source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS U U AAA L %
% V V I SS U U A A L %
% V V I SSS U U AAAAA L %
% V V I SS U U A A L %
% V IIIII SSSSS UUU A A LLLLL %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS %
% E F F E C T SS %
% EEE FFF FFF EEE C T SSS %
% E F F E C T SS %
% EEEEE F F EEEEE CCCC T SSSSS %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% ExceptionInfo *exception)
% Image *AddNoiseImageChannel(const Image *image,const ChannelType channel,
% const NoiseType noise_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
ExceptionInfo *exception)
{
Image
*noise_image;
noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception);
return(noise_image);
}
MagickExport Image *AddNoiseImageChannel(const Image *image,
const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
const char
*option;
double
attenuate;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,channel,noise_type,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
InheritException(exception,&noise_image->exception);
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
attenuate=1.0;
option=GetImageArtifact(image,"attenuate");
if (option != (char *) NULL)
attenuate=StringToDouble(option,(char **) NULL);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict noise_indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(GenerateDifferentialNoise(random_info[id],
GetPixelRed(p),noise_type,attenuate)));
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
else
{
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelGreen(p),noise_type,attenuate)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelBlue(p),noise_type,attenuate)));
}
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelOpacity(p),noise_type,attenuate)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(noise_indexes+x,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],GetPixelIndex(
indexes+x),noise_type,attenuate)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shift_image->exception);
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
Quantum
quantum;
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(p);
if (GetPixelGreen(p) < quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) < quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(GetPixelRed(p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(p)+factor*quantum);
quantum=GetPixelRed(p);
if (GetPixelGreen(p) > quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) > quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *opacity,
% const PixelPacket colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A character string indicating the level of opacity as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *opacity,
const PixelPacket colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
CacheView
*colorize_view,
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&colorize_image->exception);
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) ||
(IsPixelGray(&colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace);
if ((colorize_image->matte == MagickFalse) &&
(colorize.opacity != OpaqueOpacity))
(void) SetImageAlphaChannel(colorize_image,OpaqueAlphaChannel);
if (opacity == (const char *) NULL)
return(colorize_image);
/*
Determine RGB values of the pen color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
colorize_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,colorize_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,((GetPixelRed(p)*(100.0-pixel.red)+
colorize.red*pixel.red)/100.0));
SetPixelGreen(q,((GetPixelGreen(p)*(100.0-pixel.green)+
colorize.green*pixel.green)/100.0));
SetPixelBlue(q,((GetPixelBlue(p)*(100.0-pixel.blue)+
colorize.blue*pixel.blue)/100.0));
if (colorize_image->matte == MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
else
SetPixelOpacity(q,((GetPixelOpacity(p)*(100.0-pixel.opacity)+
colorize.opacity*pixel.opacity)/100.0));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(colorize_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
colorize_view=DestroyCacheView(colorize_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
u,
v,
y;
/*
Create color matrix.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass) == MagickFalse)
{
InheritException(exception,&color_image->exception);
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
ColorMatrix image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickRealType
pixel;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
IndexPacket
*magick_restrict color_indexes;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
color_indexes=GetCacheViewAuthenticIndexQueue(color_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
size_t
height;
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
pixel=ColorMatrix[v][0]*GetPixelRed(p)+ColorMatrix[v][1]*
GetPixelGreen(p)+ColorMatrix[v][2]*GetPixelBlue(p);
if (image->matte != MagickFalse)
pixel+=ColorMatrix[v][3]*(QuantumRange-GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
pixel+=ColorMatrix[v][4]*GetPixelIndex(indexes+x);
pixel+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: SetPixelRed(q,ClampToQuantum(pixel)); break;
case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break;
case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break;
case 3:
{
if (image->matte != MagickFalse)
SetPixelAlpha(q,ClampToQuantum(pixel));
break;
}
case 4:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(color_indexes+x,ClampToQuantum(pixel));
break;
}
}
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*image_view,
*implode_view;
double
radius;
Image
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
implode_image=CloneImage(image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse)
{
InheritException(exception,&implode_image->exception);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
if (implode_image->background_color.opacity != OpaqueOpacity)
implode_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*image->columns;
center.y=0.5*image->rows;
radius=center.x;
if (image->columns > image->rows)
scale.y=(double) image->columns*PerceptibleReciprocal((double)
image->rows);
else
if (image->columns < image->rows)
{
scale.x=(double) image->rows*PerceptibleReciprocal((double)
image->columns);
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(implode_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,implode_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
MagickPixelPacket
pixel;
PointInfo
delta;
IndexPacket
*magick_restrict implode_indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin((double) (MagickPI*sqrt((double) distance)*
PerceptibleReciprocal(radius)/2)),-amount);
status=InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) (factor*delta.x*PerceptibleReciprocal(scale.x)+
center.x),(double) (factor*delta.y*PerceptibleReciprocal(scale.y)+center.y),&pixel,
exception);
if (status == MagickFalse)
break;
SetPixelPacket(implode_image,&pixel,q,implode_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ImplodeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,
const size_t number_frames,ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
const Image
*next;
ssize_t
i;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (i=1; i < (ssize_t) number_frames; i++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (i=0; i < (ssize_t) number_frames; i++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (i+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*
next->rows+beta*GetNextImageInList(next)->rows+0.5),
next->filter,next->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse)
{
InheritException(exception,&morph_image->exception);
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,
GetNextImageInList(next)->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(alpha*
GetPixelRed(q)+beta*GetPixelRed(p)));
SetPixelGreen(q,ClampToQuantum(alpha*
GetPixelGreen(q)+beta*GetPixelGreen(p)));
SetPixelBlue(q,ClampToQuantum(alpha*
GetPixelBlue(q)+beta*GetPixelBlue(p)));
SetPixelOpacity(q,ClampToQuantum(alpha*
GetPixelOpacity(q)+beta*GetPixelOpacity(p)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (i < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const MagickRealType pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
MagickExport MagickBooleanType PlasmaImageProxy(Image *image,
CacheView *image_view,CacheView *u_view,CacheView *v_view,
RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth)
{
ExceptionInfo
*exception;
double
plasma;
MagickStatusType
status;
PixelPacket
u,
v;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
exception=(&image->exception);
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
PixelPacket
*magick_restrict q;
/*
Left pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
(void) GetOneCacheViewVirtualPixel(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,((MagickRealType) u.red+
v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,((MagickRealType) u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,((MagickRealType) u.blue+
v.blue)/2.0,plasma));
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=CastDoubleToLong(ceil(segment->x2-0.5));
(void) GetOneCacheViewVirtualPixel(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
SetPixelRed(q,PlasmaPixel(random_info,((MagickRealType) u.red+
v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,((MagickRealType) u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,((MagickRealType) u.blue+
v.blue)/2.0,plasma));
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
PixelPacket
*magick_restrict q;
/*
Bottom pixel.
*/
y=CastDoubleToLong(ceil(segment->y2-0.5));
(void) GetOneCacheViewVirtualPixel(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,((MagickRealType) u.red+
v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,((MagickRealType) u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,((MagickRealType) u.blue+
v.blue)/2.0,plasma));
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
PixelPacket
*magick_restrict q;
/*
Top pixel.
*/
y=CastDoubleToLong(ceil(segment->y1-0.5));
(void) GetOneCacheViewVirtualPixel(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,((MagickRealType) u.red+
v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,((MagickRealType) u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,((MagickRealType) u.blue+
v.blue)/2.0,plasma));
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
PixelPacket
*magick_restrict q;
/*
Middle pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
y=CastDoubleToLong(ceil(segment->y1-0.5));
(void) GetOneCacheViewVirtualPixel(u_view,x,y,&u,exception);
x=CastDoubleToLong(ceil(segment->x2-0.5));
y=CastDoubleToLong(ceil(segment->y2-0.5));
(void) GetOneCacheViewVirtualPixel(v_view,x,y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,((MagickRealType) u.red+
v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,((MagickRealType) u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,((MagickRealType) u.blue+
v.blue)/2.0,plasma));
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,&image->exception);
u_view=AcquireVirtualCacheView(image,&image->exception);
v_view=AcquireVirtualCacheView(image,&image->exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the AnnotateImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const double angle,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const double angle,ExceptionInfo *exception)
{
const char
*value;
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
value=GetImageProperty(image,"Caption");
if (value != (const char *) NULL)
{
char
*caption;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,
value);
if (caption != (char *) NULL)
{
char
geometry[MaxTextExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,caption);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&caption);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5));
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image);
(void) CloneString(&annotate_info->text,caption);
(void) FormatLocaleString(geometry,MaxTextExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
caption=DestroyString(caption);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image);
(void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,OverCompositeOp,caption_image,
quantum,(ssize_t) (image->rows+3*quantum/2));
caption_image=DestroyImage(caption_image);
}
(void) QueryColorDatabase("none",&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
InheritException(&bend_image->exception,exception);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,OverCompositeOp,picture_image,
(ssize_t) (-0.01*picture_image->columns/2.0),0L);
picture_image=DestroyImage(picture_image);
(void) QueryColorDatabase("none",&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sepia_image->exception);
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(q,ClampToQuantum(tone));
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(q,ClampToQuantum(tone));
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(q,ClampToQuantum(tone));
tone=threshold/7.0;
if ((double) GetPixelGreen(q) < tone)
SetPixelGreen(q,ClampToQuantum(tone));
if ((double) GetPixelBlue(q) < tone)
SetPixelBlue(q,ClampToQuantum(tone));
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image);
(void) ContrastImage(sepia_image,MagickTrue);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double opacity,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double opacity,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod);
clone_image->compose=OverCompositeOp;
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorDatabase("none",&clone_image->border_color,exception);
border_image=BorderImage(clone_image,&border_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->matte == MagickFalse)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel);
/*
Shadow image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(border_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(border_image,border_image,border_image->rows,1)
#endif
for (y=0; y < (ssize_t) border_image->rows; y++)
{
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
SetPixelRed(q,border_image->background_color.red);
SetPixelGreen(q,border_image->background_color.green);
SetPixelBlue(q,border_image->background_color.blue);
if (border_image->matte == MagickFalse)
SetPixelOpacity(q,border_image->background_color.opacity);
else
SetPixelOpacity(q,ClampToQuantum((double) (QuantumRange-
GetPixelAlpha(q)*opacity/100.0)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadowImageTag,progress,
border_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
MagickPixelPacket
zero;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
GetMagickPixelPacket(random_image,&zero);
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(random_view);
pixel=zero;
for (x=0; x < (ssize_t) random_image->columns; x++)
{
pixel.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
pixel.green=pixel.red;
pixel.blue=pixel.red;
if (image->colorspace == CMYKColorspace)
pixel.index=pixel.red;
SetPixelPacket(random_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_view=DestroyCacheView(random_view);
random_image=DestroyImage(random_image);
return(random_image);
}
random_view=DestroyCacheView(random_view);
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image);
if (status != MagickFalse)
status=NormalizeImage(dodge_image);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%");
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold)
% MagickBooleanType SolarizeImageChannel(Image *image,
% const ChannelType channel,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold)
{
MagickBooleanType
status;
status=SolarizeImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType SolarizeImageChannel(Image *image,
const ChannelType channel,const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((channel & GreenChannel) != 0)
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((channel & BlueChannel) != 0)
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
if ((double) GetPixelRed(q) > threshold)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
if ((double) GetPixelGreen(q) > threshold)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
if ((double) GetPixelBlue(q) > threshold)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelPacket
pixel;
PixelPacket
*q;
ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stegano_image->exception);
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
(void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception);
if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (PixelPacket *) NULL)
break;
switch (c)
{
case 0:
{
SetBit(GetPixelRed(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 1:
{
SetBit(GetPixelGreen(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 2:
{
SetBit(GetPixelBlue(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (stegano_image->storage_class == PseudoClass)
(void) SyncImage(stegano_image);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stereo_image->exception);
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
PixelPacket
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(r,GetPixelRed(p));
SetPixelGreen(r,GetPixelGreen(q));
SetPixelBlue(r,GetPixelBlue(q));
SetPixelOpacity(r,(GetPixelOpacity(p)+q->opacity)/2);
p++;
q++;
r++;
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*image_view,
*swirl_view;
double
radius;
Image
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
swirl_image=CloneImage(image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse)
{
InheritException(exception,&swirl_image->exception);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.opacity != OpaqueOpacity)
swirl_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
scale.x=(double) image->rows/(double) image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(swirl_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,swirl_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
MagickPixelPacket
pixel;
PointInfo
delta;
IndexPacket
*magick_restrict swirl_indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt(distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/
scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+
center.y),&pixel,exception);
if (status == MagickFalse)
break;
SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SwirlImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *opacity,
% const PixelPacket tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *opacity,
const PixelPacket tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
color_vector,
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&tint_image->exception);
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelGray(&tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace);
if (opacity == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the tint color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0-
PixelPacketIntensity(&tint));
color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0-
PixelPacketIntensity(&tint));
color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0-
PixelPacketIntensity(&tint));
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
weight;
MagickPixelPacket
pixel;
weight=QuantumScale*GetPixelRed(p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
SetPixelRed(q,ClampToQuantum(pixel.red));
weight=QuantumScale*GetPixelGreen(p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(p)+color_vector.green*(1.0-
(4.0*(weight*weight)));
SetPixelGreen(q,ClampToQuantum(pixel.green));
weight=QuantumScale*GetPixelBlue(p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MaxTextExtent];
DrawInfo
*draw_info;
Image
*blur_image,
*canvas_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse)
{
InheritException(exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
canvas_image->matte=MagickTrue;
oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows,
MagickTrue,exception);
if (oval_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
(void) QueryColorDatabase("#000000",&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorDatabase("#ffffff",&draw_info->fill,exception);
(void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception);
(void) FormatLocaleString(ellipse,MaxTextExtent,
"ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0,
image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
blur_image->matte=MagickFalse;
(void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception);
canvas_image=DestroyImage(canvas_image);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*image_view,
*wave_view;
float
*sine_map;
Image
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse)
{
InheritException(exception,&wave_image->exception);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
if (wave_image->background_color.opacity != OpaqueOpacity)
wave_image->matte=MagickTrue;
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)*PerceptibleReciprocal(wave_length)));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(wave_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
MagickPixelPacket
pixel;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(wave_view);
pixel=zero;
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel,
exception);
if (status == MagickFalse)
break;
SetPixelPacket(wave_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,WaveImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
image_view=DestroyCacheView(image_view);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
ssize_t
i;
p=pixels;
q=pixels+scale*stride,
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
size_t
max_channels;
ssize_t
channel;
static const double
noise_levels[]= {
0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044 };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
noise_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,3*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=image->columns*image->rows;
max_channels=(size_t) (image->colorspace == CMYKColorspace ? 4 : 3);
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) max_channels; channel++)
{
ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
if (status == MagickFalse)
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (channel)
{
case 0: pixels[i]=(float) GetPixelRed(p); break;
case 1: pixels[i]=(float) GetPixelGreen(p); break;
case 2: pixels[i]=(float) GetPixelBlue(p); break;
case 3: pixels[i]=(float) indexes[x]; break;
default: break;
}
i++;
p++;
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
IndexPacket
*magick_restrict noise_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
float
pixel;
pixel=pixels[i]+pixels[low_pass+i];
switch (channel)
{
case 0: SetPixelRed(q,ClampToQuantum(pixel)); break;
case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break;
case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break;
case 3: SetPixelIndex(noise_indexes+x,ClampToQuantum(pixel)); break;
default: break;
}
i++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,max_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
return(noise_image);
}
|
omp_strmm_batch.c | /**
* @file omp_strmm_batch.c
*
* @brief BBLAS omp_strmm_batch float routine.
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @generated from ./bblas_omp/omp_ztrmm_batch.c normal z -> s, Mon Jun 6 09:44:14 2016
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define REAL
/**
Purpose
-------
<b>strmm_batch</b> is an OpenMP version of strmm_batch.
It perfoms one of the matrix-matrix operations
arrayB[i] = alpha[i]*op( arrayA[i] )*arrayB[i], or
arrayB[i] = alpha[i]*arrayB[i]*op( arrayA[i] )
where op( X ) is one of
op( X ) = X or
op( X ) = X**T or
op( X ) = X**H,
alpha[i] is a scalar, arrayB[i] is an M[i] by N[i] matrix,
and arrayA[i] is a unit or non-unit, upper or lower triangular matrix.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayB, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayB, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of side[0], uplo[0], M[0], N[0], transA[0], diag[0],
alpha[0], lda[0], and ldb[0] are used for all computations.
Parameters
----------
@param[in]
side Array of <tt>enum BBLAS_SIDE</tt>.
Each element side[i] specifies whether op( arrayA[i] )
multiplies arrayB[i] from left or right as follows:
- = 'BblasLeft' arrayB[i] = alpha[i]*op( arrayA[i] )*arrayB[i].
- = 'BblasRight' arrayB[i] = alpha[i]*arrayB[i]*op( arrayA[i] ).
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the matrix arrayA[i] is an
upper or lower triangular matrix as follows:
- = 'BblasUpper' arrayA[i] is an upper triangular matrix.
- = 'BblasLower' arrayA[i] is a lower triangular matrix.
@param[in]
transA Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the form of op( arrayA[i] ) to be
used in the matrix multiplication as follows:
- = 'BblasNoTrans' op( arrayA[i] ) = arrayA[i].
- = 'BblasTrans' op( arrayA[i] ) = arrayA[i]**T.
- = 'BblasConjTrans' op( arrayA[i] ) = arrayA[i]**H.
@param[in]
diag - Array of <tt>enum BBLAS_DIAG</tt>.
On entry, diag[i] specifies whether or not arrayA[i] is unit
triangular as follows:
- = 'BblasUnit' arrayA[i] is assumed to be unit triangular.
- = 'BblasNonUnit' arrayA[i] is not assumed to be unit triangular.
@param[in]
M Array of <tt>int</tt>.
Each element M[i] specifies the number of rows of the matrix arrayB[i].
M[i] must be greater than zero.
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of columns of the matrix arrayB[i].
N[i] must be greater than zero.
@param[in]
alpha Array of <tt>real_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a REAL matrix of
dimension lda[i] by Ka[i],
where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise.
When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i]
must contain the triangular matrix:
when uplo[i] = BblasUpper, the upper triangular part of arrayA[i]
must contain the matrix whilst the strictly lower triangular part is not used;
similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i]
must contain the matrix whilst the strictly upper triangular part is not used.
When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must
contain the symmetric matrix:
when uplo[i] = BblasUpper, the upper triangular part of arrayA[i]
must contain the matrix whilst the strictly lower triangular part is not used;
similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i]
must contain the matrix whilst the strictly upper triangular part is not used.
Note that when diag = BblasUnit the diagonal elements of arrayA[i] are
not used either, they are assumed to be equal to one.
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When side[i] = BblasLeft
then lda[i] must be at least max( 1, M[i] ),
otherwise lda[i] must be at least max( 1, N[i] ).
@param[in,out]
arrayB Array of pointers.
Each element arrayB[i] is a pointer to a REAL matrix of
dimension ldb[i] by N[i].
The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements.
On exit is arrayB[i] overwritten by the updated matrix.
@param[in]
ldb Array of <tt>int</tt>.
Each element ldb[i] specifies the first dimension of arrayB[i] as declared
in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith strmm in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_strmm_batch(
const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo,
const enum BBLAS_TRANS *transA, const enum BBLAS_DIAG *diag,
const int *M, const int *N, const float *alpha,
const float **arrayA, const int *lda,
float **arrayB, const int *ldb,
const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA;
char func_name[15] = "strmm_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((side[first_index] != BblasLeft) &&
(side[first_index] != BblasRight))
{
xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_SIDE;
}
return;
}
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((transA[first_index] != BblasNoTrans) &&
(transA[first_index] != BblasTrans) &&
(transA[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANSA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANSA;
}
return;
}
if ((diag[first_index] != BblasNonUnit) &&
(diag[first_index] != BblasUnit))
{
xerbla_batch(func_name, BBLAS_ERR_DIAG, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_DIAG;
}
return;
}
if (M[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_M, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_M;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (side[first_index] == BblasLeft)
{
LDA = M[first_index];
} else
{
LDA = N[first_index];
}
if (lda[first_index] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDA;
}
return;
}
if (ldb[first_index] < max(1, M[first_index])) {
xerbla_batch(func_name, BBLAS_ERR_LDB, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDB;
}
return;
}
/* particular case */
if (min(M[first_index], N[first_index]) == 0)
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_strmm */
cblas_strmm(
BblasColMajor,
side[first_index],
uplo[first_index],
transA[first_index],
diag[first_index],
M[first_index],
N[first_index],
(alpha[first_index]),
arrayA[batch_iter],
lda[first_index],
arrayB[batch_iter],
ldb[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private(batch_iter, LDA)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((side[batch_iter] != BblasLeft) &&
(side[batch_iter] != BblasRight))
{
xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter);
info[batch_iter] = BBLAS_ERR_SIDE;
continue;
}
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((transA[batch_iter] != BblasNoTrans) &&
(transA[batch_iter] != BblasTrans) &&
(transA[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANSA, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANSA;
continue;
}
if (M[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_M, batch_iter);
info[batch_iter] = BBLAS_ERR_M;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name,BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (side[batch_iter] == BblasLeft)
{
LDA = M[batch_iter];
} else
{
LDA = N[batch_iter];
}
if (lda[batch_iter] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldb[batch_iter] < max(1, M[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (min(M[batch_iter], N[batch_iter]) == 0)
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_strmm(
BblasColMajor,
side[batch_iter],
uplo[batch_iter],
transA[batch_iter],
diag[batch_iter],
M[batch_iter],
N[batch_iter],
(alpha[batch_iter]),
arrayA[batch_iter],
lda[batch_iter],
arrayB[batch_iter],
ldb[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef REAL
|
macro-3.c | /* PR preprocessor/27746 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-omplower" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
#define omp FOO
#define p parallel
#define omp_parallel _Pragma ("omp parallel")
#define omp_p _Pragma ("omp p")
void bar (void);
void
foo (void)
{
#pragma omp parallel
bar ();
#pragma omp p
bar ();
omp_parallel
bar ();
omp_p
bar ();
}
/* { dg-final { scan-tree-dump-times "#pragma omp parallel" 4 "omplower" } } */
|
GB_unop__identity_uint8_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_fc64)
// op(A') function: GB (_unop_tran__identity_uint8_fc64)
// C type: uint8_t
// A type: GxB_FC64_t
// cast: uint8_t cij = GB_cast_to_uint8_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_fc64)
(
uint8_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cryptsha512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code and Drepper's spec at
* http://www.akkadia.org/drepper/SHA-crypt.txt
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cryptsha512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cryptsha512);
#else
#define _GNU_SOURCE 1
#include <string.h>
#ifdef _OPENMP
#define OMP_SCALE 16
#include <omp.h>
#endif
#include "arch.h"
#include "sha2.h"
#include "params.h"
#include "common.h"
#include "formats.h"
// these MUST be defined prior to loading cryptsha512_valid.h
#define BINARY_SIZE 64
#define SALT_LENGTH 16
#define CIPHERTEXT_LENGTH 86
#include "cryptsha512_common.h"
#include "memdbg.h"
#define FORMAT_LABEL "sha512crypt"
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define PLAINTEXT_LENGTH 125
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$6$LKO/Ute40T3FNF95$6S/6T2YuOIHY0N3XpLKABJ3soYcXD9mB7uVbtEZDj/LNscVhZoZ9DEH.sBciDrMsHOWOoASbNLTypH/5X26gN0", "U*U*U*U*"},
{"$6$LKO/Ute40T3FNF95$wK80cNqkiAUzFuVGxW6eFe8J.fSVI65MD5yEm8EjYMaJuDrhwe5XXpHDJpwF/kY.afsUs1LlgQAaOapVNbggZ1", "U*U***U"},
{"$6$LKO/Ute40T3FNF95$YS81pp1uhOHTgKLhSMtQCr2cDiUiN03Ud3gyD4ameviK1Zqz.w3oXsMgO6LrqmIEcG3hiqaUqHi/WEE2zrZqa/", "U*U***U*"},
{"$6$OmBOuxFYBZCYAadG$WCckkSZok9xhp4U1shIZEV7CCVwQUwMVea7L3A77th6SaE9jOPupEMJB.z0vIWCDiN9WLh2m9Oszrj5G.gt330", "*U*U*U*U"},
{"$6$ojWH1AiTee9x1peC$QVEnTvRVlPRhcLQCk/HnHaZmlGAAjCfrAN0FtOsOnUk5K5Bn/9eLHHiRzrTzaIKjW9NTLNIBUCtNVOowWS2mN.", ""},
{NULL}
};
static int (*saved_key_length);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct saltstruct {
unsigned int len;
unsigned int rounds;
unsigned char salt[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT;
#endif
saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_key_length[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_key_length[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_key_length[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
// portably align temp_result char * pointer machine word size.
union xx {
unsigned char c[BINARY_SIZE];
ARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)];
} u;
unsigned char *temp_result = u.c;
SHA512_CTX ctx;
SHA512_CTX alt_ctx;
size_t cnt;
char *cp;
char p_bytes[PLAINTEXT_LENGTH+1];
char s_bytes[PLAINTEXT_LENGTH+1];
/* Prepare for the real work. */
SHA512_Init(&ctx);
/* Add the key string. */
SHA512_Update(&ctx, (unsigned char*)saved_key[index], saved_key_length[index]);
/* The last part is the salt string. This must be at most 16
characters and it ends at the first `$' character (for
compatibility with existing implementations). */
SHA512_Update(&ctx, cur_salt->salt, cur_salt->len);
/* Compute alternate SHA512 sum with input KEY, SALT, and KEY. The
final result will be added to the first context. */
SHA512_Init(&alt_ctx);
/* Add key. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[index], saved_key_length[index]);
/* Add salt. */
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Add key again. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[index], saved_key_length[index]);
/* Now get result of this (64 bytes) and add it to the other
context. */
SHA512_Final((unsigned char*)crypt_out[index], &alt_ctx);
/* Add for any character in the key one byte of the alternate sum. */
for (cnt = saved_key_length[index]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE)
SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE);
SHA512_Update(&ctx, (unsigned char*)crypt_out[index], cnt);
/* Take the binary representation of the length of the key and for every
1 add the alternate sum, for every 0 the key. */
for (cnt = saved_key_length[index]; cnt > 0; cnt >>= 1)
if ((cnt & 1) != 0)
SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE);
else
SHA512_Update(&ctx, (unsigned char*)saved_key[index], saved_key_length[index]);
/* Create intermediate result. */
SHA512_Final((unsigned char*)crypt_out[index], &ctx);
/* Start computation of P byte sequence. */
SHA512_Init(&alt_ctx);
/* For every character in the password add the entire password. */
for (cnt = 0; cnt < saved_key_length[index]; ++cnt)
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[index], saved_key_length[index]);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence P. */
cp = p_bytes;
for (cnt = saved_key_length[index]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Start computation of S byte sequence. */
SHA512_Init(&alt_ctx);
/* For every character in the password add the entire password. */
for (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[index])[0]; ++cnt)
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence S. */
cp = s_bytes;
for (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Repeatedly run the collected hash value through SHA512 to
burn CPU cycles. */
for (cnt = 0; cnt < cur_salt->rounds; ++cnt)
{
/* New context. */
SHA512_Init(&ctx);
/* Add key or last result. */
if ((cnt & 1) != 0)
SHA512_Update(&ctx, p_bytes, saved_key_length[index]);
else
SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE);
/* Add salt for numbers not divisible by 3. */
if (cnt % 3 != 0)
SHA512_Update(&ctx, s_bytes, cur_salt->len);
/* Add key for numbers not divisible by 7. */
if (cnt % 7 != 0)
SHA512_Update(&ctx, p_bytes, saved_key_length[index]);
/* Add key or last result. */
if ((cnt & 1) != 0)
SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE);
else
SHA512_Update(&ctx, p_bytes, saved_key_length[index]);
/* Create intermediate [SIC] result. */
SHA512_Final((unsigned char*)crypt_out[index], &ctx);
}
}
return count;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
static struct saltstruct out;
int len;
out.rounds = ROUNDS_DEFAULT;
ciphertext += 3;
if (!strncmp(ciphertext, ROUNDS_PREFIX,
sizeof(ROUNDS_PREFIX) - 1)) {
const char *num = ciphertext + sizeof(ROUNDS_PREFIX) - 1;
char *endp;
unsigned long int srounds = strtoul(num, &endp, 10);
if (*endp == '$')
{
ciphertext = endp + 1;
srounds = srounds < ROUNDS_MIN ?
ROUNDS_MIN : srounds;
out.rounds = srounds > ROUNDS_MAX ?
ROUNDS_MAX : srounds;
}
}
for (len = 0; ciphertext[len] != '$'; len++);
memcpy(out.salt, ciphertext, len);
out.len = len;
return &out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
// Public domain hash function by DJ Bernstein
// We are hashing the entire struct
static int salt_hash(void *salt)
{
unsigned char *s = salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < SALT_SIZE; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
#if FMT_MAIN_VERSION > 11
/* iteration count as tunable cost parameter */
static unsigned int sha512crypt_iterations(void *salt)
{
struct saltstruct *sha512crypt_salt;
sha512crypt_salt = salt;
return (unsigned int)sha512crypt_salt->rounds;
}
#endif
struct fmt_main fmt_cryptsha512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
sha512crypt_iterations,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
potential.c | /* Generated by Cython 0.29.28 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"extra_compile_args": [
"-O3",
"-march=native",
"-ffast-math",
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
],
"name": "potential",
"sources": [
"gadgetutils/potential.pyx"
]
},
"module_name": "potential"
}
END: Cython Metadata */
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_28"
#define CYTHON_HEX_VERSION 0x001D1CF0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#if PY_VERSION_HEX >= 0x030B00A4
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#elif !defined(CYTHON_FAST_THREAD_STATE)
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1)
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#if PY_VERSION_HEX >= 0x030B00A4
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif !defined(CYTHON_USE_EXC_INFO_STACK)
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#if PY_MAJOR_VERSION < 3
#include "longintrepr.h"
#endif
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_DefaultClassType PyType_Type
#if PY_VERSION_HEX >= 0x030B00A1
static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f,
PyObject *code, PyObject *c, PyObject* n, PyObject *v,
PyObject *fv, PyObject *cell, PyObject* fn,
PyObject *name, int fline, PyObject *lnos) {
PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL;
PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL;
const char *fn_cstr=NULL;
const char *name_cstr=NULL;
PyCodeObject* co=NULL;
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
if (!(kwds=PyDict_New())) goto end;
if (!(argcount=PyLong_FromLong(a))) goto end;
if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end;
if (!(posonlyargcount=PyLong_FromLong(0))) goto end;
if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end;
if (!(kwonlyargcount=PyLong_FromLong(k))) goto end;
if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end;
if (!(nlocals=PyLong_FromLong(l))) goto end;
if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end;
if (!(stacksize=PyLong_FromLong(s))) goto end;
if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end;
if (!(flags=PyLong_FromLong(f))) goto end;
if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end;
if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end;
if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end;
if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end;
if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too;
if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here
if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too;
Py_XDECREF((PyObject*)co);
co = (PyCodeObject*)call_result;
call_result = NULL;
if (0) {
cleanup_code_too:
Py_XDECREF((PyObject*)co);
co = NULL;
}
end:
Py_XDECREF(kwds);
Py_XDECREF(argcount);
Py_XDECREF(posonlyargcount);
Py_XDECREF(kwonlyargcount);
Py_XDECREF(nlocals);
Py_XDECREF(stacksize);
Py_XDECREF(replace);
Py_XDECREF(call_result);
Py_XDECREF(empty);
if (type) {
PyErr_Restore(type, value, traceback);
}
return co;
}
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__potential
#define __PYX_HAVE_API__potential
/* Early includes */
#include <math.h>
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"gadgetutils/potential.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_defaults;
typedef struct __pyx_defaults __pyx_defaults;
struct __pyx_defaults1;
typedef struct __pyx_defaults1 __pyx_defaults1;
struct __pyx_defaults2;
typedef struct __pyx_defaults2 __pyx_defaults2;
struct __pyx_defaults3;
typedef struct __pyx_defaults3 __pyx_defaults3;
struct __pyx_defaults {
PyObject *__pyx_arg_epsilon;
};
struct __pyx_defaults1 {
PyObject *__pyx_arg_epsilon;
};
struct __pyx_defaults2 {
PyObject *__pyx_arg_epsilon;
};
struct __pyx_defaults3 {
PyObject *__pyx_arg_epsilon;
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* PyDictContains.proto */
static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) {
int result = PyDict_Contains(dict, item);
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);
#define __Pyx_PyObject_Dict_GetItem(obj, name)\
(likely(PyDict_CheckExact(obj)) ?\
__Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
#if CYTHON_FAST_PYCALL
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif // CYTHON_FAST_PYCALL
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* UnicodeAsUCS4.proto */
static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*);
/* object_ord.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyObject_Ord(c)\
(likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c))
#else
#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c)
#endif
static long __Pyx__PyObject_Ord(PyObject* c);
/* SetItemInt.proto */
#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
__Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
int is_list, int wraparound, int boundscheck);
/* IterFinish.proto */
static CYTHON_INLINE int __Pyx_IterFinish(void);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* PyObjectGetMethod.proto */
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
/* PyObjectCallMethod0.proto */
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* UnpackItemEndCheck.proto */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* UnpackTupleError.proto */
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index);
/* UnpackTuple2.proto */
#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple)\
(likely(is_tuple || PyTuple_Check(tuple)) ?\
(likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ?\
__Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) :\
(__Pyx_UnpackTupleError(tuple, 2), -1)) :\
__Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple))
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple);
static int __Pyx_unpack_tuple2_generic(
PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple);
/* dict_iter.proto */
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_is_dict);
static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict);
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* DivInt[Py_ssize_t].proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* DivInt[long].proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* FetchCommonType.proto */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
/* CythonFunctionShared.proto */
#define __Pyx_CyFunction_USED 1
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
#define __Pyx_CYFUNCTION_CCLASS 0x04
#define __Pyx_CyFunction_GetClosure(f)\
(((__pyx_CyFunctionObject *) (f))->func_closure)
#define __Pyx_CyFunction_GetClassObj(f)\
(((__pyx_CyFunctionObject *) (f))->func_classobj)
#define __Pyx_CyFunction_Defaults(type, f)\
((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\
((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
typedef struct {
PyCFunctionObject func;
#if PY_VERSION_HEX < 0x030500A0
PyObject *func_weakreflist;
#endif
PyObject *func_dict;
PyObject *func_name;
PyObject *func_qualname;
PyObject *func_doc;
PyObject *func_globals;
PyObject *func_code;
PyObject *func_closure;
PyObject *func_classobj;
void *defaults;
int defaults_pyobjects;
size_t defaults_size; // used by FusedFunction for copying defaults
int flags;
PyObject *defaults_tuple;
PyObject *defaults_kwdict;
PyObject *(*defaults_getter)(PyObject *);
PyObject *func_annotations;
} __pyx_CyFunctionObject;
static PyTypeObject *__pyx_CyFunctionType = 0;
#define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType))
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *self,
PyObject *module, PyObject *globals,
PyObject* code);
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
size_t size,
int pyobjects);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
PyObject *tuple);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
PyObject *dict);
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
PyObject *dict);
static int __pyx_CyFunction_init(void);
/* FusedFunction.proto */
typedef struct {
__pyx_CyFunctionObject func;
PyObject *__signatures__;
PyObject *type;
PyObject *self;
} __pyx_FusedFunctionObject;
static PyObject *__pyx_FusedFunction_New(PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *closure,
PyObject *module, PyObject *globals,
PyObject *code);
static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self);
static PyTypeObject *__pyx_FusedFunctionType = NULL;
static int __pyx_FusedFunction_init(void);
#define __Pyx_FusedFunction_USED
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag);
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* BytesContains.proto */
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* ImportNumPyArray.proto */
static PyObject *__pyx_numpy_ndarray = NULL;
static PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'libc.math' */
/* Module declarations from 'potential' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "potential"
extern int __pyx_module_is_main_potential;
int __pyx_module_is_main_potential = 0;
/* Implementation of 'potential' */
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_N[] = "N";
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_h[] = "h";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_k[] = "k";
static const char __pyx_k_r[] = "r";
static const char __pyx_k_u[] = "u";
static const char __pyx_k_W2[] = "W2";
static const char __pyx_k__2[] = "";
static const char __pyx_k__3[] = "()";
static const char __pyx_k__4[] = "|";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_u2[] = "u2";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_pos[] = "pos";
static const char __pyx_k_args[] = "args";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_dims[] = "dims";
static const char __pyx_k_kind[] = "kind";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_temp[] = "temp";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_dist2[] = "dist2";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_float[] = "float";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_split[] = "split";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_strip[] = "strip";
static const char __pyx_k_double[] = "double";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_kwargs[] = "kwargs";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_epsilon[] = "epsilon";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_defaults[] = "defaults";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_potential[] = "potential";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_signatures[] = "signatures";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_sum_inv_pairdists[] = "sum_inv_pairdists";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_gadgetutils_potential_pyx[] = "gadgetutils/potential.pyx";
static const char __pyx_k_No_matching_signature_found[] = "No matching signature found";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Expected_at_least_d_argument_s_g[] = "Expected at least %d argument%s, got %d";
static const char __pyx_k_Function_call_with_ambiguous_arg[] = "Function call with ambiguous argument types";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Expected_at_least_d_argument_s_g;
static PyObject *__pyx_kp_s_Function_call_with_ambiguous_arg;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_s_N;
static PyObject *__pyx_kp_s_No_matching_signature_found;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_W2;
static PyObject *__pyx_kp_s__2;
static PyObject *__pyx_kp_s__3;
static PyObject *__pyx_kp_s__4;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_args;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_defaults;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dims;
static PyObject *__pyx_n_s_dist2;
static PyObject *__pyx_n_s_double;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_epsilon;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_float;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_kp_s_gadgetutils_potential_pyx;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_h;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_k;
static PyObject *__pyx_n_s_kind;
static PyObject *__pyx_n_s_kwargs;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pos;
static PyObject *__pyx_n_s_potential;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_r;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_signatures;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_split;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_strip;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_sum_inv_pairdists;
static PyObject *__pyx_n_s_temp;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_u;
static PyObject *__pyx_n_s_u2;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_pf_9potential_sum_inv_pairdists(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults); /* proto */
static PyObject *__pyx_pf_9potential_12__defaults__(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_9potential_2sum_inv_pairdists(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_pos, PyObject *__pyx_v_epsilon); /* proto */
static PyObject *__pyx_pf_9potential_14__defaults__(CYTHON_UNUSED PyObject *__pyx_self); /* proto */
static PyObject *__pyx_pf_9potential_4sum_inv_pairdists(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_pos, PyObject *__pyx_v_epsilon); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_float_2_8;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_k_;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__21;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__28;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_tuple__30;
static PyObject *__pyx_tuple__31;
static PyObject *__pyx_tuple__32;
static PyObject *__pyx_codeobj__26;
static PyObject *__pyx_codeobj__33;
/* Late includes */
/* "potential.pyx":16
* @cython.cdivision(True)
* @cython.initializedcheck(False)
* def sum_inv_pairdists(my_float [:,::1] pos, epsilon=1e-2): # <<<<<<<<<<<<<<
* """Calculate the sum of inverse pair distances for a collection of particles.
* Can be used to calculate potentials / potential energies.
*/
/* Python wrapper */
static PyObject *__pyx_pw_9potential_1sum_inv_pairdists(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_9potential_sum_inv_pairdists[] = "Calculate the sum of inverse pair distances for a collection of particles.\n Can be used to calculate potentials / potential energies.\n ";
static PyMethodDef __pyx_mdef_9potential_1sum_inv_pairdists = {"sum_inv_pairdists", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_9potential_1sum_inv_pairdists, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9potential_sum_inv_pairdists};
static PyObject *__pyx_pw_9potential_1sum_inv_pairdists(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_signatures = 0;
PyObject *__pyx_v_args = 0;
PyObject *__pyx_v_kwargs = 0;
CYTHON_UNUSED PyObject *__pyx_v_defaults = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_fused_cpdef (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_signatures,&__pyx_n_s_args,&__pyx_n_s_kwargs,&__pyx_n_s_defaults,0};
PyObject* values[4] = {0,0,0,0};
values[1] = __pyx_k_;
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_signatures)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_args);
if (value) { values[1] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kwargs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 2); __PYX_ERR(0, 16, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_defaults)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, 3); __PYX_ERR(0, 16, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_fused_cpdef") < 0)) __PYX_ERR(0, 16, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_signatures = values[0];
__pyx_v_args = values[1];
__pyx_v_kwargs = values[2];
__pyx_v_defaults = values[3];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_fused_cpdef", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("potential.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_9potential_sum_inv_pairdists(__pyx_self, __pyx_v_signatures, __pyx_v_args, __pyx_v_kwargs, __pyx_v_defaults);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9potential_sum_inv_pairdists(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_signatures, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs, CYTHON_UNUSED PyObject *__pyx_v_defaults) {
PyObject *__pyx_v_dest_sig = NULL;
Py_ssize_t __pyx_v_i;
PyTypeObject *__pyx_v_ndarray = 0;
__Pyx_memviewslice __pyx_v_memslice;
Py_ssize_t __pyx_v_itemsize;
CYTHON_UNUSED int __pyx_v_dtype_signed;
char __pyx_v_kind;
PyObject *__pyx_v_arg = NULL;
PyObject *__pyx_v_dtype = NULL;
PyObject *__pyx_v_arg_base = NULL;
PyObject *__pyx_v_candidates = NULL;
PyObject *__pyx_v_sig = NULL;
int __pyx_v_match_found;
PyObject *__pyx_v_src_sig = NULL;
PyObject *__pyx_v_dst_type = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
long __pyx_t_7;
__Pyx_memviewslice __pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
PyObject *__pyx_t_13 = NULL;
PyObject *__pyx_t_14 = NULL;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("sum_inv_pairdists", 0);
__Pyx_INCREF(__pyx_v_kwargs);
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyList_SET_ITEM(__pyx_t_1, 0, Py_None);
__pyx_v_dest_sig = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_kwargs); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_3 = ((!__pyx_t_4) != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
__Pyx_INCREF(Py_None);
__Pyx_DECREF_SET(__pyx_v_kwargs, Py_None);
}
__pyx_t_1 = ((PyObject *)__Pyx_ImportNumPyArrayTypeIfAvailable()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_ndarray = ((PyTypeObject*)__pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_itemsize = -1L;
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_2 = ((0 < __pyx_t_5) != 0);
if (__pyx_t_2) {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_ITEM(((PyObject*)__pyx_v_args), 0);
__Pyx_INCREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
__pyx_t_3 = (__pyx_v_kwargs != Py_None);
__pyx_t_4 = (__pyx_t_3 != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L7_bool_binop_done;
}
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_4 = (__Pyx_PyDict_ContainsTF(__pyx_n_s_pos, ((PyObject*)__pyx_v_kwargs), Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_3 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_3;
__pyx_L7_bool_binop_done:;
if (__pyx_t_2) {
if (unlikely(__pyx_v_kwargs == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_kwargs), __pyx_n_s_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_arg = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6;
}
/*else*/ {
if (unlikely(__pyx_v_args == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_5 = PyTuple_GET_SIZE(((PyObject*)__pyx_v_args)); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_1 = PyInt_FromSsize_t(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_int_1);
__Pyx_INCREF(__pyx_kp_s__2);
__Pyx_GIVEREF(__pyx_kp_s__2);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_kp_s__2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_Expected_at_least_d_argument_s_g, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_L6:;
while (1) {
__pyx_t_2 = (__pyx_v_ndarray != ((PyTypeObject*)Py_None));
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L12;
}
__pyx_t_2 = __pyx_memoryview_check(__pyx_v_arg);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_base); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_arg_base = __pyx_t_6;
__pyx_t_6 = 0;
__pyx_t_3 = __Pyx_TypeCheck(__pyx_v_arg_base, __pyx_v_ndarray);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg_base, __pyx_n_s_dtype); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_dtype = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L13;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L13:;
goto __pyx_L12;
}
/*else*/ {
__Pyx_INCREF(Py_None);
__pyx_v_dtype = Py_None;
}
__pyx_L12:;
__pyx_v_itemsize = -1L;
__pyx_t_2 = (__pyx_v_dtype != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_itemsize = __pyx_t_5;
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_dtype, __pyx_n_s_kind); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_Ord(__pyx_t_6); if (unlikely(__pyx_t_7 == ((long)(long)(Py_UCS4)-1))) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_kind = __pyx_t_7;
__pyx_v_dtype_signed = (__pyx_v_kind == 'i');
switch (__pyx_v_kind) {
case 'i':
case 'u':
break;
case 'f':
__pyx_t_2 = (((sizeof(float)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L16_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 2) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L16_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_t_2 = (((sizeof(double)) == __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_arg, __pyx_n_s_ndim); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_6); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = ((((Py_ssize_t)__pyx_t_5) == 2) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
if (__pyx_t_3) {
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
goto __pyx_L10_break;
}
break;
case 'c':
break;
case 'O':
break;
default: break;
}
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L22_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(float))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L22_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_float, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
__pyx_t_2 = ((__pyx_v_itemsize == -1L) != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_3 = __pyx_t_2;
goto __pyx_L26_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_itemsize == (sizeof(double))) != 0);
__pyx_t_3 = __pyx_t_2;
__pyx_L26_bool_binop_done:;
if (__pyx_t_3) {
__pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_arg, 0);
__pyx_v_memslice = __pyx_t_8;
__pyx_t_3 = (__pyx_v_memslice.memview != 0);
if (__pyx_t_3) {
__PYX_XDEC_MEMVIEW((&__pyx_v_memslice), 1);
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, __pyx_n_s_double, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
goto __pyx_L10_break;
}
/*else*/ {
PyErr_Clear();
}
}
if (unlikely(__Pyx_SetItemInt(__pyx_v_dest_sig, 0, Py_None, long, 1, __Pyx_PyInt_From_long, 1, 0, 0) < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
goto __pyx_L10_break;
}
__pyx_L10_break:;
__pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_v_candidates = ((PyObject*)__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_dict_iterator(((PyObject*)__pyx_v_signatures), 1, ((PyObject *)NULL), (&__pyx_t_9), (&__pyx_t_10)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__pyx_t_6 = __pyx_t_1;
__pyx_t_1 = 0;
while (1) {
__pyx_t_11 = __Pyx_dict_iter_next(__pyx_t_6, __pyx_t_9, &__pyx_t_5, &__pyx_t_1, NULL, NULL, __pyx_t_10);
if (unlikely(__pyx_t_11 == 0)) break;
if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_v_match_found = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_sig, __pyx_n_s_strip); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__pyx_t_14 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_14 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_14)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_14);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_12 = (__pyx_t_14) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_14, __pyx_kp_s__3) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s__3);
__Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_12, __pyx_n_s_split); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_13);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) {
__pyx_t_12 = PyMethod_GET_SELF(__pyx_t_13);
if (likely(__pyx_t_12)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_13);
__Pyx_INCREF(__pyx_t_12);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_13, function);
}
}
__pyx_t_1 = (__pyx_t_12) ? __Pyx_PyObject_Call2Args(__pyx_t_13, __pyx_t_12, __pyx_kp_s__4) : __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_kp_s__4);
__Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
__Pyx_XDECREF_SET(__pyx_v_src_sig, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_15 = PyList_GET_SIZE(__pyx_v_dest_sig); if (unlikely(__pyx_t_15 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_16 = __pyx_t_15;
for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) {
__pyx_v_i = __pyx_t_17;
__pyx_t_1 = PyList_GET_ITEM(__pyx_v_dest_sig, __pyx_v_i);
__Pyx_INCREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_dst_type, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = (__pyx_v_dst_type != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
if (__pyx_t_2) {
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_src_sig, __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_13 = PyObject_RichCompare(__pyx_t_1, __pyx_v_dst_type, Py_EQ); __Pyx_XGOTREF(__pyx_t_13); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
if (__pyx_t_2) {
__pyx_v_match_found = 1;
goto __pyx_L34;
}
/*else*/ {
__pyx_v_match_found = 0;
goto __pyx_L32_break;
}
__pyx_L34:;
}
}
__pyx_L32_break:;
__pyx_t_2 = (__pyx_v_match_found != 0);
if (__pyx_t_2) {
__pyx_t_18 = __Pyx_PyList_Append(__pyx_v_candidates, __pyx_v_sig); if (unlikely(__pyx_t_18 == ((int)-1))) __PYX_ERR(0, 16, __pyx_L1_error)
}
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_2 = (PyList_GET_SIZE(__pyx_v_candidates) != 0);
__pyx_t_3 = ((!__pyx_t_2) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_9 = PyList_GET_SIZE(__pyx_v_candidates); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_3 = ((__pyx_t_9 > 1) != 0);
if (__pyx_t_3) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 16, __pyx_L1_error)
}
/*else*/ {
__Pyx_XDECREF(__pyx_r);
if (unlikely(__pyx_v_signatures == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(0, 16, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_PyDict_GetItem(((PyObject*)__pyx_v_signatures), PyList_GET_ITEM(__pyx_v_candidates, 0)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
}
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_12);
__Pyx_XDECREF(__pyx_t_13);
__Pyx_XDECREF(__pyx_t_14);
__Pyx_AddTraceback("potential.__pyx_fused_cpdef", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_dest_sig);
__Pyx_XDECREF(__pyx_v_ndarray);
__Pyx_XDECREF(__pyx_v_arg);
__Pyx_XDECREF(__pyx_v_dtype);
__Pyx_XDECREF(__pyx_v_arg_base);
__Pyx_XDECREF(__pyx_v_candidates);
__Pyx_XDECREF(__pyx_v_sig);
__Pyx_XDECREF(__pyx_v_src_sig);
__Pyx_XDECREF(__pyx_v_dst_type);
__Pyx_XDECREF(__pyx_v_kwargs);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9potential_12__defaults__(CYTHON_UNUSED PyObject *__pyx_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__defaults__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__Pyx_CyFunction_Defaults(__pyx_defaults2, __pyx_self)->__pyx_arg_epsilon);
__Pyx_GIVEREF(__Pyx_CyFunction_Defaults(__pyx_defaults2, __pyx_self)->__pyx_arg_epsilon);
PyTuple_SET_ITEM(__pyx_t_1, 0, __Pyx_CyFunction_Defaults(__pyx_defaults2, __pyx_self)->__pyx_arg_epsilon);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None);
__pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("potential.__defaults__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_0__pyx_pw_9potential_3sum_inv_pairdists(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_0__pyx_mdef_9potential_3sum_inv_pairdists = {"__pyx_fuse_0sum_inv_pairdists", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_0__pyx_pw_9potential_3sum_inv_pairdists, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9potential_sum_inv_pairdists};
static PyObject *__pyx_fuse_0__pyx_pw_9potential_3sum_inv_pairdists(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_pos = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_v_epsilon = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("sum_inv_pairdists (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pos,&__pyx_n_s_epsilon,0};
PyObject* values[2] = {0,0};
__pyx_defaults2 *__pyx_dynamic_args = __Pyx_CyFunction_Defaults(__pyx_defaults2, __pyx_self);
values[1] = __pyx_dynamic_args->__pyx_arg_epsilon;
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pos)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_epsilon);
if (value) { values[1] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sum_inv_pairdists") < 0)) __PYX_ERR(0, 16, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_pos = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_pos.memview)) __PYX_ERR(0, 16, __pyx_L3_error)
__pyx_v_epsilon = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("sum_inv_pairdists", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("potential.sum_inv_pairdists", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_9potential_2sum_inv_pairdists(__pyx_self, __pyx_v_pos, __pyx_v_epsilon);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9potential_2sum_inv_pairdists(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_pos, PyObject *__pyx_v_epsilon) {
CYTHON_UNUSED int __pyx_v_N;
int __pyx_v_dims;
double __pyx_v_potential;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
float __pyx_v_r;
float __pyx_v_dist2;
float __pyx_v_temp;
float __pyx_v_h;
float __pyx_v_u;
float __pyx_v_W2;
float __pyx_v_u2;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
float __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
int __pyx_t_16;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_0sum_inv_pairdists", 0);
/* "potential.pyx":20
* Can be used to calculate potentials / potential energies.
* """
* cdef int N = pos.shape[0] # <<<<<<<<<<<<<<
* cdef int dims = pos.shape[1]
* cdef double potential = 0
*/
__pyx_v_N = (__pyx_v_pos.shape[0]);
/* "potential.pyx":21
* """
* cdef int N = pos.shape[0]
* cdef int dims = pos.shape[1] # <<<<<<<<<<<<<<
* cdef double potential = 0
*
*/
__pyx_v_dims = (__pyx_v_pos.shape[1]);
/* "potential.pyx":22
* cdef int N = pos.shape[0]
* cdef int dims = pos.shape[1]
* cdef double potential = 0 # <<<<<<<<<<<<<<
*
* cdef int i, j, k
*/
__pyx_v_potential = 0.0;
/* "potential.pyx":27
* cdef my_float r, dist2, temp, h, u, W2, u2
*
* h = 2.8 * epsilon # <<<<<<<<<<<<<<
*
* with nogil, parallel():
*/
__pyx_t_1 = PyNumber_Multiply(__pyx_float_2_8, __pyx_v_epsilon); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_2 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_h = __pyx_t_2;
/* "potential.pyx":29
* h = 2.8 * epsilon
*
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(N, schedule="dynamic"):
* for j in range(i):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
#ifdef _OPENMP
#pragma omp parallel reduction(-:__pyx_v_potential) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
/* "potential.pyx":30
*
* with nogil, parallel():
* for i in prange(N, schedule="dynamic"): # <<<<<<<<<<<<<<
* for j in range(i):
*
*/
__pyx_t_3 = __pyx_v_N;
if ((1 == 0)) abort();
{
__pyx_t_5 = (__pyx_t_3 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_5 > 0)
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_W2) lastprivate(__pyx_v_dist2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_r) lastprivate(__pyx_v_temp) lastprivate(__pyx_v_u) lastprivate(__pyx_v_u2) schedule(dynamic)
#endif /* _OPENMP */
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_5; __pyx_t_4++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_4);
/* Initialize private variables to invalid values */
__pyx_v_W2 = ((float)__PYX_NAN());
__pyx_v_dist2 = ((float)__PYX_NAN());
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_r = ((float)__PYX_NAN());
__pyx_v_temp = ((float)__PYX_NAN());
__pyx_v_u = ((float)__PYX_NAN());
__pyx_v_u2 = ((float)__PYX_NAN());
/* "potential.pyx":31
* with nogil, parallel():
* for i in prange(N, schedule="dynamic"):
* for j in range(i): # <<<<<<<<<<<<<<
*
* dist2 = 0
*/
__pyx_t_6 = __pyx_v_i;
__pyx_t_7 = __pyx_t_6;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_j = __pyx_t_8;
/* "potential.pyx":33
* for j in range(i):
*
* dist2 = 0 # <<<<<<<<<<<<<<
* for k in range(dims):
* temp = pos[i,k] - pos[j,k]
*/
__pyx_v_dist2 = 0.0;
/* "potential.pyx":34
*
* dist2 = 0
* for k in range(dims): # <<<<<<<<<<<<<<
* temp = pos[i,k] - pos[j,k]
* dist2 = dist2 + temp * temp
*/
__pyx_t_9 = __pyx_v_dims;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_k = __pyx_t_11;
/* "potential.pyx":35
* dist2 = 0
* for k in range(dims):
* temp = pos[i,k] - pos[j,k] # <<<<<<<<<<<<<<
* dist2 = dist2 + temp * temp
*
*/
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_k;
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_k;
__pyx_v_temp = ((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_pos.data + __pyx_t_12 * __pyx_v_pos.strides[0]) )) + __pyx_t_13)) ))) - (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_pos.data + __pyx_t_14 * __pyx_v_pos.strides[0]) )) + __pyx_t_15)) ))));
/* "potential.pyx":36
* for k in range(dims):
* temp = pos[i,k] - pos[j,k]
* dist2 = dist2 + temp * temp # <<<<<<<<<<<<<<
*
* r = sqrt(dist2)
*/
__pyx_v_dist2 = (__pyx_v_dist2 + (__pyx_v_temp * __pyx_v_temp));
}
/* "potential.pyx":38
* dist2 = dist2 + temp * temp
*
* r = sqrt(dist2) # <<<<<<<<<<<<<<
* u = r / h
*
*/
__pyx_v_r = sqrt(__pyx_v_dist2);
/* "potential.pyx":39
*
* r = sqrt(dist2)
* u = r / h # <<<<<<<<<<<<<<
*
* if u < 0.5:
*/
__pyx_v_u = (__pyx_v_r / __pyx_v_h);
/* "potential.pyx":41
* u = r / h
*
* if u < 0.5: # <<<<<<<<<<<<<<
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
*/
__pyx_t_16 = ((__pyx_v_u < 0.5) != 0);
if (__pyx_t_16) {
/* "potential.pyx":42
*
* if u < 0.5:
* u2 = u * u # <<<<<<<<<<<<<<
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2
*/
__pyx_v_u2 = (__pyx_v_u * __pyx_v_u);
/* "potential.pyx":43
* if u < 0.5:
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u)) # <<<<<<<<<<<<<<
* r = -h / W2
* elif u < 1:
*/
__pyx_v_W2 = ((-14.0 / 5.) + (__pyx_v_u2 * ((16.0 / 3.) + (__pyx_v_u2 * ((-48.0 / 5.) + ((32.0 / 5.) * __pyx_v_u))))));
/* "potential.pyx":44
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2 # <<<<<<<<<<<<<<
* elif u < 1:
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
*/
__pyx_v_r = ((-__pyx_v_h) / __pyx_v_W2);
/* "potential.pyx":41
* u = r / h
*
* if u < 0.5: # <<<<<<<<<<<<<<
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
*/
goto __pyx_L18;
}
/* "potential.pyx":45
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2
* elif u < 1: # <<<<<<<<<<<<<<
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
* r = -h / W2
*/
__pyx_t_16 = ((__pyx_v_u < 1.0) != 0);
if (__pyx_t_16) {
/* "potential.pyx":46
* r = -h / W2
* elif u < 1:
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u))) # <<<<<<<<<<<<<<
* r = -h / W2
*
*/
__pyx_v_W2 = (((1.0 / (15.0 * __pyx_v_u)) - (16.0 / 5.)) + ((__pyx_v_u * __pyx_v_u) * ((32.0 / 3.) + (__pyx_v_u * (-16.0 + (__pyx_v_u * ((48.0 / 5.) - ((32.0 / 15.) * __pyx_v_u))))))));
/* "potential.pyx":47
* elif u < 1:
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
* r = -h / W2 # <<<<<<<<<<<<<<
*
* potential -= 1/r
*/
__pyx_v_r = ((-__pyx_v_h) / __pyx_v_W2);
/* "potential.pyx":45
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2
* elif u < 1: # <<<<<<<<<<<<<<
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
* r = -h / W2
*/
}
__pyx_L18:;
/* "potential.pyx":49
* r = -h / W2
*
* potential -= 1/r # <<<<<<<<<<<<<<
*
* return potential
*/
__pyx_v_potential = (__pyx_v_potential - (1.0 / __pyx_v_r));
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "potential.pyx":29
* h = 2.8 * epsilon
*
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(N, schedule="dynamic"):
* for j in range(i):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "potential.pyx":51
* potential -= 1/r
*
* return potential # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_potential); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "potential.pyx":16
* @cython.cdivision(True)
* @cython.initializedcheck(False)
* def sum_inv_pairdists(my_float [:,::1] pos, epsilon=1e-2): # <<<<<<<<<<<<<<
* """Calculate the sum of inverse pair distances for a collection of particles.
* Can be used to calculate potentials / potential energies.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("potential.sum_inv_pairdists", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_pos, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9potential_14__defaults__(CYTHON_UNUSED PyObject *__pyx_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__defaults__", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__Pyx_CyFunction_Defaults(__pyx_defaults3, __pyx_self)->__pyx_arg_epsilon);
__Pyx_GIVEREF(__Pyx_CyFunction_Defaults(__pyx_defaults3, __pyx_self)->__pyx_arg_epsilon);
PyTuple_SET_ITEM(__pyx_t_1, 0, __Pyx_CyFunction_Defaults(__pyx_defaults3, __pyx_self)->__pyx_arg_epsilon);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None);
__pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("potential.__defaults__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_fuse_1__pyx_pw_9potential_5sum_inv_pairdists(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_fuse_1__pyx_mdef_9potential_5sum_inv_pairdists = {"__pyx_fuse_1sum_inv_pairdists", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_fuse_1__pyx_pw_9potential_5sum_inv_pairdists, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9potential_sum_inv_pairdists};
static PyObject *__pyx_fuse_1__pyx_pw_9potential_5sum_inv_pairdists(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_pos = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_v_epsilon = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("sum_inv_pairdists (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pos,&__pyx_n_s_epsilon,0};
PyObject* values[2] = {0,0};
__pyx_defaults3 *__pyx_dynamic_args = __Pyx_CyFunction_Defaults(__pyx_defaults3, __pyx_self);
values[1] = __pyx_dynamic_args->__pyx_arg_epsilon;
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pos)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_epsilon);
if (value) { values[1] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "sum_inv_pairdists") < 0)) __PYX_ERR(0, 16, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_pos = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_pos.memview)) __PYX_ERR(0, 16, __pyx_L3_error)
__pyx_v_epsilon = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("sum_inv_pairdists", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("potential.sum_inv_pairdists", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_9potential_4sum_inv_pairdists(__pyx_self, __pyx_v_pos, __pyx_v_epsilon);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9potential_4sum_inv_pairdists(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_pos, PyObject *__pyx_v_epsilon) {
CYTHON_UNUSED int __pyx_v_N;
int __pyx_v_dims;
double __pyx_v_potential;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
double __pyx_v_r;
double __pyx_v_dist2;
double __pyx_v_temp;
double __pyx_v_h;
double __pyx_v_u;
double __pyx_v_W2;
double __pyx_v_u2;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
double __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
int __pyx_t_16;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_fuse_1sum_inv_pairdists", 0);
/* "potential.pyx":20
* Can be used to calculate potentials / potential energies.
* """
* cdef int N = pos.shape[0] # <<<<<<<<<<<<<<
* cdef int dims = pos.shape[1]
* cdef double potential = 0
*/
__pyx_v_N = (__pyx_v_pos.shape[0]);
/* "potential.pyx":21
* """
* cdef int N = pos.shape[0]
* cdef int dims = pos.shape[1] # <<<<<<<<<<<<<<
* cdef double potential = 0
*
*/
__pyx_v_dims = (__pyx_v_pos.shape[1]);
/* "potential.pyx":22
* cdef int N = pos.shape[0]
* cdef int dims = pos.shape[1]
* cdef double potential = 0 # <<<<<<<<<<<<<<
*
* cdef int i, j, k
*/
__pyx_v_potential = 0.0;
/* "potential.pyx":27
* cdef my_float r, dist2, temp, h, u, W2, u2
*
* h = 2.8 * epsilon # <<<<<<<<<<<<<<
*
* with nogil, parallel():
*/
__pyx_t_1 = PyNumber_Multiply(__pyx_float_2_8, __pyx_v_epsilon); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_2 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_h = __pyx_t_2;
/* "potential.pyx":29
* h = 2.8 * epsilon
*
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(N, schedule="dynamic"):
* for j in range(i):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
#ifdef _OPENMP
#pragma omp parallel reduction(-:__pyx_v_potential) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
/* "potential.pyx":30
*
* with nogil, parallel():
* for i in prange(N, schedule="dynamic"): # <<<<<<<<<<<<<<
* for j in range(i):
*
*/
__pyx_t_3 = __pyx_v_N;
if ((1 == 0)) abort();
{
__pyx_t_5 = (__pyx_t_3 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_5 > 0)
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_W2) lastprivate(__pyx_v_dist2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_r) lastprivate(__pyx_v_temp) lastprivate(__pyx_v_u) lastprivate(__pyx_v_u2) schedule(dynamic)
#endif /* _OPENMP */
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_5; __pyx_t_4++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_4);
/* Initialize private variables to invalid values */
__pyx_v_W2 = ((double)__PYX_NAN());
__pyx_v_dist2 = ((double)__PYX_NAN());
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_r = ((double)__PYX_NAN());
__pyx_v_temp = ((double)__PYX_NAN());
__pyx_v_u = ((double)__PYX_NAN());
__pyx_v_u2 = ((double)__PYX_NAN());
/* "potential.pyx":31
* with nogil, parallel():
* for i in prange(N, schedule="dynamic"):
* for j in range(i): # <<<<<<<<<<<<<<
*
* dist2 = 0
*/
__pyx_t_6 = __pyx_v_i;
__pyx_t_7 = __pyx_t_6;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_j = __pyx_t_8;
/* "potential.pyx":33
* for j in range(i):
*
* dist2 = 0 # <<<<<<<<<<<<<<
* for k in range(dims):
* temp = pos[i,k] - pos[j,k]
*/
__pyx_v_dist2 = 0.0;
/* "potential.pyx":34
*
* dist2 = 0
* for k in range(dims): # <<<<<<<<<<<<<<
* temp = pos[i,k] - pos[j,k]
* dist2 = dist2 + temp * temp
*/
__pyx_t_9 = __pyx_v_dims;
__pyx_t_10 = __pyx_t_9;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_k = __pyx_t_11;
/* "potential.pyx":35
* dist2 = 0
* for k in range(dims):
* temp = pos[i,k] - pos[j,k] # <<<<<<<<<<<<<<
* dist2 = dist2 + temp * temp
*
*/
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_k;
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_k;
__pyx_v_temp = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_pos.data + __pyx_t_12 * __pyx_v_pos.strides[0]) )) + __pyx_t_13)) ))) - (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_pos.data + __pyx_t_14 * __pyx_v_pos.strides[0]) )) + __pyx_t_15)) ))));
/* "potential.pyx":36
* for k in range(dims):
* temp = pos[i,k] - pos[j,k]
* dist2 = dist2 + temp * temp # <<<<<<<<<<<<<<
*
* r = sqrt(dist2)
*/
__pyx_v_dist2 = (__pyx_v_dist2 + (__pyx_v_temp * __pyx_v_temp));
}
/* "potential.pyx":38
* dist2 = dist2 + temp * temp
*
* r = sqrt(dist2) # <<<<<<<<<<<<<<
* u = r / h
*
*/
__pyx_v_r = sqrt(__pyx_v_dist2);
/* "potential.pyx":39
*
* r = sqrt(dist2)
* u = r / h # <<<<<<<<<<<<<<
*
* if u < 0.5:
*/
__pyx_v_u = (__pyx_v_r / __pyx_v_h);
/* "potential.pyx":41
* u = r / h
*
* if u < 0.5: # <<<<<<<<<<<<<<
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
*/
__pyx_t_16 = ((__pyx_v_u < 0.5) != 0);
if (__pyx_t_16) {
/* "potential.pyx":42
*
* if u < 0.5:
* u2 = u * u # <<<<<<<<<<<<<<
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2
*/
__pyx_v_u2 = (__pyx_v_u * __pyx_v_u);
/* "potential.pyx":43
* if u < 0.5:
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u)) # <<<<<<<<<<<<<<
* r = -h / W2
* elif u < 1:
*/
__pyx_v_W2 = ((-14.0 / 5.) + (__pyx_v_u2 * ((16.0 / 3.) + (__pyx_v_u2 * ((-48.0 / 5.) + ((32.0 / 5.) * __pyx_v_u))))));
/* "potential.pyx":44
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2 # <<<<<<<<<<<<<<
* elif u < 1:
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
*/
__pyx_v_r = ((-__pyx_v_h) / __pyx_v_W2);
/* "potential.pyx":41
* u = r / h
*
* if u < 0.5: # <<<<<<<<<<<<<<
* u2 = u * u
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
*/
goto __pyx_L18;
}
/* "potential.pyx":45
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2
* elif u < 1: # <<<<<<<<<<<<<<
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
* r = -h / W2
*/
__pyx_t_16 = ((__pyx_v_u < 1.0) != 0);
if (__pyx_t_16) {
/* "potential.pyx":46
* r = -h / W2
* elif u < 1:
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u))) # <<<<<<<<<<<<<<
* r = -h / W2
*
*/
__pyx_v_W2 = (((1.0 / (15.0 * __pyx_v_u)) - (16.0 / 5.)) + ((__pyx_v_u * __pyx_v_u) * ((32.0 / 3.) + (__pyx_v_u * (-16.0 + (__pyx_v_u * ((48.0 / 5.) - ((32.0 / 15.) * __pyx_v_u))))))));
/* "potential.pyx":47
* elif u < 1:
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
* r = -h / W2 # <<<<<<<<<<<<<<
*
* potential -= 1/r
*/
__pyx_v_r = ((-__pyx_v_h) / __pyx_v_W2);
/* "potential.pyx":45
* W2 = -14/5. + u2 * (16/3. + u2 * (-48/5. + 32/5. * u))
* r = -h / W2
* elif u < 1: # <<<<<<<<<<<<<<
* W2 = 1/(15*u) - 16/5. + u*u * (32/3. + u * (-16 + u * (48/5. - 32/15. * u)))
* r = -h / W2
*/
}
__pyx_L18:;
/* "potential.pyx":49
* r = -h / W2
*
* potential -= 1/r # <<<<<<<<<<<<<<
*
* return potential
*/
__pyx_v_potential = (__pyx_v_potential - (1.0 / __pyx_v_r));
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "potential.pyx":29
* h = 2.8 * epsilon
*
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(N, schedule="dynamic"):
* for j in range(i):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "potential.pyx":51
* potential -= 1/r
*
* return potential # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_potential); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "potential.pyx":16
* @cython.cdivision(True)
* @cython.initializedcheck(False)
* def sum_inv_pairdists(my_float [:,::1] pos, epsilon=1e-2): # <<<<<<<<<<<<<<
* """Calculate the sum of inverse pair distances for a collection of particles.
* Can be used to calculate potentials / potential energies.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("potential.sum_inv_pairdists", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_pos, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__18, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__21);
__Pyx_GIVEREF(__pyx_slice__21);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__21);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__21); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__21);
__Pyx_GIVEREF(__pyx_slice__21);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__21);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"potential.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"potential.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"potential.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"potential._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_potential(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_potential},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"potential",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Expected_at_least_d_argument_s_g, __pyx_k_Expected_at_least_d_argument_s_g, sizeof(__pyx_k_Expected_at_least_d_argument_s_g), 0, 0, 1, 0},
{&__pyx_kp_s_Function_call_with_ambiguous_arg, __pyx_k_Function_call_with_ambiguous_arg, sizeof(__pyx_k_Function_call_with_ambiguous_arg), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1},
{&__pyx_kp_s_No_matching_signature_found, __pyx_k_No_matching_signature_found, sizeof(__pyx_k_No_matching_signature_found), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_W2, __pyx_k_W2, sizeof(__pyx_k_W2), 0, 0, 1, 1},
{&__pyx_kp_s__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 0, 1, 0},
{&__pyx_kp_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 0},
{&__pyx_kp_s__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 0, 1, 0},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_defaults, __pyx_k_defaults, sizeof(__pyx_k_defaults), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dims, __pyx_k_dims, sizeof(__pyx_k_dims), 0, 0, 1, 1},
{&__pyx_n_s_dist2, __pyx_k_dist2, sizeof(__pyx_k_dist2), 0, 0, 1, 1},
{&__pyx_n_s_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_epsilon, __pyx_k_epsilon, sizeof(__pyx_k_epsilon), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_kp_s_gadgetutils_potential_pyx, __pyx_k_gadgetutils_potential_pyx, sizeof(__pyx_k_gadgetutils_potential_pyx), 0, 0, 1, 0},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1},
{&__pyx_n_s_kind, __pyx_k_kind, sizeof(__pyx_k_kind), 0, 0, 1, 1},
{&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pos, __pyx_k_pos, sizeof(__pyx_k_pos), 0, 0, 1, 1},
{&__pyx_n_s_potential, __pyx_k_potential, sizeof(__pyx_k_potential), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_signatures, __pyx_k_signatures, sizeof(__pyx_k_signatures), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_strip, __pyx_k_strip, sizeof(__pyx_k_strip), 0, 0, 1, 1},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_sum_inv_pairdists, __pyx_k_sum_inv_pairdists, sizeof(__pyx_k_sum_inv_pairdists), 0, 0, 1, 1},
{&__pyx_n_s_temp, __pyx_k_temp, sizeof(__pyx_k_temp), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_u, __pyx_k_u, sizeof(__pyx_k_u), 0, 0, 1, 1},
{&__pyx_n_s_u2, __pyx_k_u2, sizeof(__pyx_k_u2), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "potential.pyx":16
* @cython.cdivision(True)
* @cython.initializedcheck(False)
* def sum_inv_pairdists(my_float [:,::1] pos, epsilon=1e-2): # <<<<<<<<<<<<<<
* """Calculate the sum of inverse pair distances for a collection of particles.
* Can be used to calculate potentials / potential energies.
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_No_matching_signature_found); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Function_call_with_ambiguous_arg); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__18 = PyTuple_New(1); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__18, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__21 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__21)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__21);
__Pyx_GIVEREF(__pyx_slice__21);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "potential.pyx":16
* @cython.cdivision(True)
* @cython.initializedcheck(False)
* def sum_inv_pairdists(my_float [:,::1] pos, epsilon=1e-2): # <<<<<<<<<<<<<<
* """Calculate the sum of inverse pair distances for a collection of particles.
* Can be used to calculate potentials / potential energies.
*/
__pyx_tuple__25 = PyTuple_Pack(15, __pyx_n_s_pos, __pyx_n_s_epsilon, __pyx_n_s_N, __pyx_n_s_dims, __pyx_n_s_potential, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_r, __pyx_n_s_dist2, __pyx_n_s_temp, __pyx_n_s_h, __pyx_n_s_u, __pyx_n_s_W2, __pyx_n_s_u2); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(2, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_gadgetutils_potential_pyx, __pyx_n_s_sum_inv_pairdists, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 16, __pyx_L1_error)
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__28);
__Pyx_GIVEREF(__pyx_tuple__28);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__30);
__Pyx_GIVEREF(__pyx_tuple__30);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__31);
__Pyx_GIVEREF(__pyx_tuple__31);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__32 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__32);
__Pyx_GIVEREF(__pyx_tuple__32);
__pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_float_2_8 = PyFloat_FromDouble(2.8); if (unlikely(!__pyx_float_2_8)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initpotential(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initpotential(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_potential(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_potential(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_potential(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
static PyThread_type_lock __pyx_t_5[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'potential' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_potential(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
PyEval_InitThreads();
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("potential", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_potential) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "potential")) {
if (unlikely(PyDict_SetItemString(modules, "potential", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "potential.pyx":16
* @cython.cdivision(True)
* @cython.initializedcheck(False)
* def sum_inv_pairdists(my_float [:,::1] pos, epsilon=1e-2): # <<<<<<<<<<<<<<
* """Calculate the sum of inverse pair distances for a collection of particles.
* Can be used to calculate potentials / potential energies.
*/
__pyx_t_1 = PyFloat_FromDouble(1e-2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = PyFloat_FromDouble(1e-2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_k_ = __pyx_t_1;
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __pyx_FusedFunction_New(&__pyx_fuse_0__pyx_mdef_9potential_3sum_inv_pairdists, 0, __pyx_n_s_sum_inv_pairdists, NULL, __pyx_n_s_potential, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!__Pyx_CyFunction_InitDefaults(__pyx_t_3, sizeof(__pyx_defaults2), 1)) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_4 = PyFloat_FromDouble(1e-2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_CyFunction_Defaults(__pyx_defaults2, __pyx_t_3)->__pyx_arg_epsilon = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_t_2);
__Pyx_CyFunction_SetDefaultsGetter(__pyx_t_3, __pyx_pf_9potential_12__defaults__);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_float, __pyx_t_3) < 0) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __pyx_FusedFunction_New(&__pyx_fuse_1__pyx_mdef_9potential_5sum_inv_pairdists, 0, __pyx_n_s_sum_inv_pairdists, NULL, __pyx_n_s_potential, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!__Pyx_CyFunction_InitDefaults(__pyx_t_3, sizeof(__pyx_defaults3), 1)) __PYX_ERR(0, 16, __pyx_L1_error)
__pyx_t_4 = PyFloat_FromDouble(1e-2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_CyFunction_Defaults(__pyx_defaults3, __pyx_t_3)->__pyx_arg_epsilon = __pyx_t_4;
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_t_2);
__Pyx_CyFunction_SetDefaultsGetter(__pyx_t_3, __pyx_pf_9potential_14__defaults__);
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_double, __pyx_t_3) < 0) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __pyx_FusedFunction_New(&__pyx_mdef_9potential_1sum_inv_pairdists, 0, __pyx_n_s_sum_inv_pairdists, NULL, __pyx_n_s_potential, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_t_2);
((__pyx_FusedFunctionObject *) __pyx_t_3)->__signatures__ = __pyx_t_1;
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_sum_inv_pairdists, __pyx_t_3) < 0) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "potential.pyx":1
* from cython.parallel import parallel, prange # <<<<<<<<<<<<<<
* cimport cython
* from libc.math cimport sqrt
*/
__pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_5[0] = PyThread_allocate_lock();
__pyx_t_5[1] = PyThread_allocate_lock();
__pyx_t_5[2] = PyThread_allocate_lock();
__pyx_t_5[3] = PyThread_allocate_lock();
__pyx_t_5[4] = PyThread_allocate_lock();
__pyx_t_5[5] = PyThread_allocate_lock();
__pyx_t_5[6] = PyThread_allocate_lock();
__pyx_t_5[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_5, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init potential", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init potential");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* DictGetItem */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
if (unlikely(PyTuple_Check(key))) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args)) {
PyErr_SetObject(PyExc_KeyError, args);
Py_DECREF(args);
}
} else {
PyErr_SetObject(PyExc_KeyError, key);
}
}
return NULL;
}
Py_INCREF(value);
return value;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* UnicodeAsUCS4 */
static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) {
Py_ssize_t length;
#if CYTHON_PEP393_ENABLED
length = PyUnicode_GET_LENGTH(x);
if (likely(length == 1)) {
return PyUnicode_READ_CHAR(x, 0);
}
#else
length = PyUnicode_GET_SIZE(x);
if (likely(length == 1)) {
return PyUnicode_AS_UNICODE(x)[0];
}
#if Py_UNICODE_SIZE == 2
else if (PyUnicode_GET_SIZE(x) == 2) {
Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0];
if (high_val >= 0xD800 && high_val <= 0xDBFF) {
Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1];
if (low_val >= 0xDC00 && low_val <= 0xDFFF) {
return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1)));
}
}
}
#endif
#endif
PyErr_Format(PyExc_ValueError,
"only single character unicode strings can be converted to Py_UCS4, "
"got length %" CYTHON_FORMAT_SSIZE_T "d", length);
return (Py_UCS4)-1;
}
/* object_ord */
static long __Pyx__PyObject_Ord(PyObject* c) {
Py_ssize_t size;
if (PyBytes_Check(c)) {
size = PyBytes_GET_SIZE(c);
if (likely(size == 1)) {
return (unsigned char) PyBytes_AS_STRING(c)[0];
}
#if PY_MAJOR_VERSION < 3
} else if (PyUnicode_Check(c)) {
return (long)__Pyx_PyUnicode_AsPy_UCS4(c);
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
} else if (PyByteArray_Check(c)) {
size = PyByteArray_GET_SIZE(c);
if (likely(size == 1)) {
return (unsigned char) PyByteArray_AS_STRING(c)[0];
}
#endif
} else {
PyErr_Format(PyExc_TypeError,
"ord() expected string of length 1, but %.200s found", Py_TYPE(c)->tp_name);
return (long)(Py_UCS4)-1;
}
PyErr_Format(PyExc_TypeError,
"ord() expected a character, but string of length %zd found", size);
return (long)(Py_UCS4)-1;
}
/* SetItemInt */
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
int r;
if (!j) return -1;
r = PyObject_SetItem(o, j, v);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list,
CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) {
PyObject* old = PyList_GET_ITEM(o, n);
Py_INCREF(v);
PyList_SET_ITEM(o, n, v);
Py_DECREF(old);
return 1;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return -1;
PyErr_Clear();
}
}
return m->sq_ass_item(o, i, v);
}
}
#else
#if CYTHON_COMPILING_IN_PYPY
if (is_list || (PySequence_Check(o) && !PyDict_Check(o)))
#else
if (is_list || PySequence_Check(o))
#endif
{
return PySequence_SetItem(o, i, v);
}
#endif
return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
}
/* IterFinish */
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* PyObjectGetMethod */
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
PyObject *attr;
#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
PyTypeObject *tp = Py_TYPE(obj);
PyObject *descr;
descrgetfunc f = NULL;
PyObject **dictptr, *dict;
int meth_found = 0;
assert (*method == NULL);
if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
attr = __Pyx_PyObject_GetAttrStr(obj, name);
goto try_unpack;
}
if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
return 0;
}
descr = _PyType_Lookup(tp, name);
if (likely(descr != NULL)) {
Py_INCREF(descr);
#if PY_MAJOR_VERSION >= 3
#ifdef __Pyx_CyFunction_USED
if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
#else
if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type)))
#endif
#else
#ifdef __Pyx_CyFunction_USED
if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr)))
#else
if (likely(PyFunction_Check(descr)))
#endif
#endif
{
meth_found = 1;
} else {
f = Py_TYPE(descr)->tp_descr_get;
if (f != NULL && PyDescr_IsData(descr)) {
attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
Py_DECREF(descr);
goto try_unpack;
}
}
}
dictptr = _PyObject_GetDictPtr(obj);
if (dictptr != NULL && (dict = *dictptr) != NULL) {
Py_INCREF(dict);
attr = __Pyx_PyDict_GetItemStr(dict, name);
if (attr != NULL) {
Py_INCREF(attr);
Py_DECREF(dict);
Py_XDECREF(descr);
goto try_unpack;
}
Py_DECREF(dict);
}
if (meth_found) {
*method = descr;
return 1;
}
if (f != NULL) {
attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
Py_DECREF(descr);
goto try_unpack;
}
if (descr != NULL) {
*method = descr;
return 0;
}
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(name));
#endif
return 0;
#else
attr = __Pyx_PyObject_GetAttrStr(obj, name);
goto try_unpack;
#endif
try_unpack:
#if CYTHON_UNPACK_METHODS
if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
PyObject *function = PyMethod_GET_FUNCTION(attr);
Py_INCREF(function);
Py_DECREF(attr);
*method = function;
return 1;
}
#endif
*method = attr;
return 0;
}
/* PyObjectCallMethod0 */
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) {
PyObject *method = NULL, *result = NULL;
int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
if (likely(is_method)) {
result = __Pyx_PyObject_CallOneArg(method, obj);
Py_DECREF(method);
return result;
}
if (unlikely(!method)) goto bad;
result = __Pyx_PyObject_CallNoArg(method);
Py_DECREF(method);
bad:
return result;
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* UnpackItemEndCheck */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* UnpackTupleError */
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
} else if (PyTuple_GET_SIZE(t) < index) {
__Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
__Pyx_RaiseTooManyValuesError(index);
}
}
/* UnpackTuple2 */
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) {
PyObject *value1 = NULL, *value2 = NULL;
#if CYTHON_COMPILING_IN_PYPY
value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad;
value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad;
#else
value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1);
value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2);
#endif
if (decref_tuple) {
Py_DECREF(tuple);
}
*pvalue1 = value1;
*pvalue2 = value2;
return 0;
#if CYTHON_COMPILING_IN_PYPY
bad:
Py_XDECREF(value1);
Py_XDECREF(value2);
if (decref_tuple) { Py_XDECREF(tuple); }
return -1;
#endif
}
static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2,
int has_known_size, int decref_tuple) {
Py_ssize_t index;
PyObject *value1 = NULL, *value2 = NULL, *iter = NULL;
iternextfunc iternext;
iter = PyObject_GetIter(tuple);
if (unlikely(!iter)) goto bad;
if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; }
iternext = Py_TYPE(iter)->tp_iternext;
value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; }
value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; }
if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad;
Py_DECREF(iter);
*pvalue1 = value1;
*pvalue2 = value2;
return 0;
unpacking_failed:
if (!has_known_size && __Pyx_IterFinish() == 0)
__Pyx_RaiseNeedMoreValuesError(index);
bad:
Py_XDECREF(iter);
Py_XDECREF(value1);
Py_XDECREF(value2);
if (decref_tuple) { Py_XDECREF(tuple); }
return -1;
}
/* dict_iter */
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_source_is_dict) {
is_dict = is_dict || likely(PyDict_CheckExact(iterable));
*p_source_is_dict = is_dict;
if (is_dict) {
#if !CYTHON_COMPILING_IN_PYPY
*p_orig_length = PyDict_Size(iterable);
Py_INCREF(iterable);
return iterable;
#elif PY_MAJOR_VERSION >= 3
static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL;
PyObject **pp = NULL;
if (method_name) {
const char *name = PyUnicode_AsUTF8(method_name);
if (strcmp(name, "iteritems") == 0) pp = &py_items;
else if (strcmp(name, "iterkeys") == 0) pp = &py_keys;
else if (strcmp(name, "itervalues") == 0) pp = &py_values;
if (pp) {
if (!*pp) {
*pp = PyUnicode_FromString(name + 4);
if (!*pp)
return NULL;
}
method_name = *pp;
}
}
#endif
}
*p_orig_length = 0;
if (method_name) {
PyObject* iter;
iterable = __Pyx_PyObject_CallMethod0(iterable, method_name);
if (!iterable)
return NULL;
#if !CYTHON_COMPILING_IN_PYPY
if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable))
return iterable;
#endif
iter = PyObject_GetIter(iterable);
Py_DECREF(iterable);
return iter;
}
return PyObject_GetIter(iterable);
}
static CYTHON_INLINE int __Pyx_dict_iter_next(
PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) {
PyObject* next_item;
#if !CYTHON_COMPILING_IN_PYPY
if (source_is_dict) {
PyObject *key, *value;
if (unlikely(orig_length != PyDict_Size(iter_obj))) {
PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration");
return -1;
}
if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) {
return 0;
}
if (pitem) {
PyObject* tuple = PyTuple_New(2);
if (unlikely(!tuple)) {
return -1;
}
Py_INCREF(key);
Py_INCREF(value);
PyTuple_SET_ITEM(tuple, 0, key);
PyTuple_SET_ITEM(tuple, 1, value);
*pitem = tuple;
} else {
if (pkey) {
Py_INCREF(key);
*pkey = key;
}
if (pvalue) {
Py_INCREF(value);
*pvalue = value;
}
}
return 1;
} else if (PyTuple_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyTuple_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else if (PyList_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyList_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else
#endif
{
next_item = PyIter_Next(iter_obj);
if (unlikely(!next_item)) {
return __Pyx_IterFinish();
}
}
if (pitem) {
*pitem = next_item;
} else if (pkey && pvalue) {
if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1))
return -1;
} else if (pkey) {
*pkey = next_item;
} else {
*pvalue = next_item;
}
return 1;
}
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* DivInt[Py_ssize_t] */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* DivInt[long] */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* FetchCommonType */
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
PyObject* fake_module;
PyTypeObject* cached_type = NULL;
fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
if (!fake_module) return NULL;
Py_INCREF(fake_module);
cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
if (cached_type) {
if (!PyType_Check((PyObject*)cached_type)) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s is not a type object",
type->tp_name);
goto bad;
}
if (cached_type->tp_basicsize != type->tp_basicsize) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s has the wrong size, try recompiling",
type->tp_name);
goto bad;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
if (PyType_Ready(type) < 0) goto bad;
if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
goto bad;
Py_INCREF(type);
cached_type = type;
}
done:
Py_DECREF(fake_module);
return cached_type;
bad:
Py_XDECREF(cached_type);
cached_type = NULL;
goto done;
}
/* CythonFunctionShared */
#include <structmember.h>
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
if (unlikely(op->func_doc == NULL)) {
if (op->func.m_ml->ml_doc) {
#if PY_MAJOR_VERSION >= 3
op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc);
#else
op->func_doc = PyString_FromString(op->func.m_ml->ml_doc);
#endif
if (unlikely(op->func_doc == NULL))
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
Py_INCREF(op->func_doc);
return op->func_doc;
}
static int
__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp = op->func_doc;
if (value == NULL) {
value = Py_None;
}
Py_INCREF(value);
op->func_doc = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_name == NULL)) {
#if PY_MAJOR_VERSION >= 3
op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name);
#else
op->func_name = PyString_InternFromString(op->func.m_ml->ml_name);
#endif
if (unlikely(op->func_name == NULL))
return NULL;
}
Py_INCREF(op->func_name);
return op->func_name;
}
static int
__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = op->func_name;
Py_INCREF(value);
op->func_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_qualname);
return op->func_qualname;
}
static int
__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = op->func_qualname;
Py_INCREF(value);
op->func_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure)
{
PyObject *self;
self = m->func_closure;
if (self == NULL)
self = Py_None;
Py_INCREF(self);
return self;
}
static PyObject *
__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_dict == NULL)) {
op->func_dict = PyDict_New();
if (unlikely(op->func_dict == NULL))
return NULL;
}
Py_INCREF(op->func_dict);
return op->func_dict;
}
static int
__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
if (unlikely(value == NULL)) {
PyErr_SetString(PyExc_TypeError,
"function's dictionary may not be deleted");
return -1;
}
if (unlikely(!PyDict_Check(value))) {
PyErr_SetString(PyExc_TypeError,
"setting function's dictionary to a non-dict");
return -1;
}
tmp = op->func_dict;
Py_INCREF(value);
op->func_dict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_globals);
return op->func_globals;
}
static PyObject *
__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *
__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
PyObject* result = (op->func_code) ? op->func_code : Py_None;
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
int result = 0;
PyObject *res = op->defaults_getter((PyObject *) op);
if (unlikely(!res))
return -1;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
Py_INCREF(op->defaults_tuple);
op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
Py_INCREF(op->defaults_kwdict);
#else
op->defaults_tuple = PySequence_ITEM(res, 0);
if (unlikely(!op->defaults_tuple)) result = -1;
else {
op->defaults_kwdict = PySequence_ITEM(res, 1);
if (unlikely(!op->defaults_kwdict)) result = -1;
}
#endif
Py_DECREF(res);
return result;
}
static int
__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyTuple_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__defaults__ must be set to a tuple object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_tuple;
op->defaults_tuple = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_tuple;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_tuple;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
value = Py_None;
} else if (value != Py_None && !PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__kwdefaults__ must be set to a dict object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_kwdict;
op->defaults_kwdict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_kwdict;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_kwdict;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value || value == Py_None) {
value = NULL;
} else if (!PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__annotations__ must be set to a dict object");
return -1;
}
Py_XINCREF(value);
tmp = op->func_annotations;
op->func_annotations = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->func_annotations;
if (unlikely(!result)) {
result = PyDict_New();
if (unlikely(!result)) return NULL;
op->func_annotations = result;
}
Py_INCREF(result);
return result;
}
static PyGetSetDef __pyx_CyFunction_getsets[] = {
{(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
{(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0},
{(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
{(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
{0, 0, 0, 0, 0}
};
static PyMemberDef __pyx_CyFunction_members[] = {
{(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
static PyObject *
__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args)
{
#if PY_MAJOR_VERSION >= 3
Py_INCREF(m->func_qualname);
return m->func_qualname;
#else
return PyString_FromString(m->func.m_ml->ml_name);
#endif
}
static PyMethodDef __pyx_CyFunction_methods[] = {
{"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
{0, 0, 0, 0}
};
#if PY_VERSION_HEX < 0x030500A0
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
#else
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist)
#endif
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
if (unlikely(op == NULL))
return NULL;
op->flags = flags;
__Pyx_CyFunction_weakreflist(op) = NULL;
op->func.m_ml = ml;
op->func.m_self = (PyObject *) op;
Py_XINCREF(closure);
op->func_closure = closure;
Py_XINCREF(module);
op->func.m_module = module;
op->func_dict = NULL;
op->func_name = NULL;
Py_INCREF(qualname);
op->func_qualname = qualname;
op->func_doc = NULL;
op->func_classobj = NULL;
op->func_globals = globals;
Py_INCREF(op->func_globals);
Py_XINCREF(code);
op->func_code = code;
op->defaults_pyobjects = 0;
op->defaults_size = 0;
op->defaults = NULL;
op->defaults_tuple = NULL;
op->defaults_kwdict = NULL;
op->defaults_getter = NULL;
op->func_annotations = NULL;
return (PyObject *) op;
}
static int
__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
{
Py_CLEAR(m->func_closure);
Py_CLEAR(m->func.m_module);
Py_CLEAR(m->func_dict);
Py_CLEAR(m->func_name);
Py_CLEAR(m->func_qualname);
Py_CLEAR(m->func_doc);
Py_CLEAR(m->func_globals);
Py_CLEAR(m->func_code);
Py_CLEAR(m->func_classobj);
Py_CLEAR(m->defaults_tuple);
Py_CLEAR(m->defaults_kwdict);
Py_CLEAR(m->func_annotations);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_XDECREF(pydefaults[i]);
PyObject_Free(m->defaults);
m->defaults = NULL;
}
return 0;
}
static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
if (__Pyx_CyFunction_weakreflist(m) != NULL)
PyObject_ClearWeakRefs((PyObject *) m);
__Pyx_CyFunction_clear(m);
PyObject_GC_Del(m);
}
static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
PyObject_GC_UnTrack(m);
__Pyx__CyFunction_dealloc(m);
}
static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
{
Py_VISIT(m->func_closure);
Py_VISIT(m->func.m_module);
Py_VISIT(m->func_dict);
Py_VISIT(m->func_name);
Py_VISIT(m->func_qualname);
Py_VISIT(m->func_doc);
Py_VISIT(m->func_globals);
Py_VISIT(m->func_code);
Py_VISIT(m->func_classobj);
Py_VISIT(m->defaults_tuple);
Py_VISIT(m->defaults_kwdict);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_VISIT(pydefaults[i]);
}
return 0;
}
static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type)
{
#if PY_MAJOR_VERSION < 3
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(func);
return func;
}
if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) {
if (type == NULL)
type = (PyObject *)(Py_TYPE(obj));
return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type)));
}
if (obj == Py_None)
obj = NULL;
#endif
return __Pyx_PyMethod_New(func, obj, type);
}
static PyObject*
__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromFormat("<cyfunction %U at %p>",
op->func_qualname, (void *)op);
#else
return PyString_FromFormat("<cyfunction %s at %p>",
PyString_AsString(op->func_qualname), (void *)op);
#endif
}
static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) {
PyCFunctionObject* f = (PyCFunctionObject*)func;
PyCFunction meth = f->m_ml->ml_meth;
Py_ssize_t size;
switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) {
case METH_VARARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0))
return (*meth)(self, arg);
break;
case METH_VARARGS | METH_KEYWORDS:
return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw);
case METH_NOARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 0))
return (*meth)(self, NULL);
PyErr_Format(PyExc_TypeError,
"%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
case METH_O:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 1)) {
PyObject *result, *arg0;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
arg0 = PyTuple_GET_ITEM(arg, 0);
#else
arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL;
#endif
result = (*meth)(self, arg0);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(arg0);
#endif
return result;
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
default:
PyErr_SetString(PyExc_SystemError, "Bad call flags in "
"__Pyx_CyFunction_Call. METH_OLDARGS is no "
"longer supported!");
return NULL;
}
PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments",
f->m_ml->ml_name);
return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw);
}
static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) {
PyObject *result;
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
Py_ssize_t argc;
PyObject *new_args;
PyObject *self;
argc = PyTuple_GET_SIZE(args);
new_args = PyTuple_GetSlice(args, 1, argc);
if (unlikely(!new_args))
return NULL;
self = PyTuple_GetItem(args, 0);
if (unlikely(!self)) {
Py_DECREF(new_args);
return NULL;
}
result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw);
Py_DECREF(new_args);
} else {
result = __Pyx_CyFunction_Call(func, args, kw);
}
return result;
}
static PyTypeObject __pyx_CyFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"cython_function_or_method",
sizeof(__pyx_CyFunctionObject),
0,
(destructor) __Pyx_CyFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
(reprfunc) __Pyx_CyFunction_repr,
0,
0,
0,
0,
__Pyx_CyFunction_CallAsMethod,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
0,
(traverseproc) __Pyx_CyFunction_traverse,
(inquiry) __Pyx_CyFunction_clear,
0,
#if PY_VERSION_HEX < 0x030500A0
offsetof(__pyx_CyFunctionObject, func_weakreflist),
#else
offsetof(PyCFunctionObject, m_weakreflist),
#endif
0,
0,
__pyx_CyFunction_methods,
__pyx_CyFunction_members,
__pyx_CyFunction_getsets,
0,
0,
__Pyx_CyFunction_descr_get,
0,
offsetof(__pyx_CyFunctionObject, func_dict),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0,
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0,
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0,
#endif
};
static int __pyx_CyFunction_init(void) {
__pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type);
if (unlikely(__pyx_CyFunctionType == NULL)) {
return -1;
}
return 0;
}
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults = PyObject_Malloc(size);
if (unlikely(!m->defaults))
return PyErr_NoMemory();
memset(m->defaults, 0, size);
m->defaults_pyobjects = pyobjects;
m->defaults_size = size;
return m->defaults;
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_tuple = tuple;
Py_INCREF(tuple);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_kwdict = dict;
Py_INCREF(dict);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->func_annotations = dict;
Py_INCREF(dict);
}
/* FusedFunction */
static PyObject *
__pyx_FusedFunction_New(PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *closure,
PyObject *module, PyObject *globals,
PyObject *code)
{
PyObject *op = __Pyx_CyFunction_Init(
PyObject_GC_New(__pyx_CyFunctionObject, __pyx_FusedFunctionType),
ml, flags, qualname, closure, module, globals, code
);
if (likely(op)) {
__pyx_FusedFunctionObject *fusedfunc = (__pyx_FusedFunctionObject *) op;
fusedfunc->__signatures__ = NULL;
fusedfunc->type = NULL;
fusedfunc->self = NULL;
PyObject_GC_Track(op);
}
return op;
}
static void
__pyx_FusedFunction_dealloc(__pyx_FusedFunctionObject *self)
{
PyObject_GC_UnTrack(self);
Py_CLEAR(self->self);
Py_CLEAR(self->type);
Py_CLEAR(self->__signatures__);
__Pyx__CyFunction_dealloc((__pyx_CyFunctionObject *) self);
}
static int
__pyx_FusedFunction_traverse(__pyx_FusedFunctionObject *self,
visitproc visit,
void *arg)
{
Py_VISIT(self->self);
Py_VISIT(self->type);
Py_VISIT(self->__signatures__);
return __Pyx_CyFunction_traverse((__pyx_CyFunctionObject *) self, visit, arg);
}
static int
__pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self)
{
Py_CLEAR(self->self);
Py_CLEAR(self->type);
Py_CLEAR(self->__signatures__);
return __Pyx_CyFunction_clear((__pyx_CyFunctionObject *) self);
}
static PyObject *
__pyx_FusedFunction_descr_get(PyObject *self, PyObject *obj, PyObject *type)
{
__pyx_FusedFunctionObject *func, *meth;
func = (__pyx_FusedFunctionObject *) self;
if (func->self || func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(self);
return self;
}
if (obj == Py_None)
obj = NULL;
meth = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_New(
((PyCFunctionObject *) func)->m_ml,
((__pyx_CyFunctionObject *) func)->flags,
((__pyx_CyFunctionObject *) func)->func_qualname,
((__pyx_CyFunctionObject *) func)->func_closure,
((PyCFunctionObject *) func)->m_module,
((__pyx_CyFunctionObject *) func)->func_globals,
((__pyx_CyFunctionObject *) func)->func_code);
if (!meth)
return NULL;
if (func->func.defaults) {
PyObject **pydefaults;
int i;
if (!__Pyx_CyFunction_InitDefaults((PyObject*)meth,
func->func.defaults_size,
func->func.defaults_pyobjects)) {
Py_XDECREF((PyObject*)meth);
return NULL;
}
memcpy(meth->func.defaults, func->func.defaults, func->func.defaults_size);
pydefaults = __Pyx_CyFunction_Defaults(PyObject *, meth);
for (i = 0; i < meth->func.defaults_pyobjects; i++)
Py_XINCREF(pydefaults[i]);
}
Py_XINCREF(func->func.func_classobj);
meth->func.func_classobj = func->func.func_classobj;
Py_XINCREF(func->__signatures__);
meth->__signatures__ = func->__signatures__;
Py_XINCREF(type);
meth->type = type;
Py_XINCREF(func->func.defaults_tuple);
meth->func.defaults_tuple = func->func.defaults_tuple;
if (func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD)
obj = type;
Py_XINCREF(obj);
meth->self = obj;
return (PyObject *) meth;
}
static PyObject *
_obj_to_str(PyObject *obj)
{
if (PyType_Check(obj))
return PyObject_GetAttr(obj, __pyx_n_s_name_2);
else
return PyObject_Str(obj);
}
static PyObject *
__pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx)
{
PyObject *signature = NULL;
PyObject *unbound_result_func;
PyObject *result_func = NULL;
if (self->__signatures__ == NULL) {
PyErr_SetString(PyExc_TypeError, "Function is not fused");
return NULL;
}
if (PyTuple_Check(idx)) {
PyObject *list = PyList_New(0);
Py_ssize_t n = PyTuple_GET_SIZE(idx);
PyObject *sep = NULL;
int i;
if (unlikely(!list))
return NULL;
for (i = 0; i < n; i++) {
int ret;
PyObject *string;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *item = PyTuple_GET_ITEM(idx, i);
#else
PyObject *item = PySequence_ITEM(idx, i); if (unlikely(!item)) goto __pyx_err;
#endif
string = _obj_to_str(item);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(item);
#endif
if (unlikely(!string)) goto __pyx_err;
ret = PyList_Append(list, string);
Py_DECREF(string);
if (unlikely(ret < 0)) goto __pyx_err;
}
sep = PyUnicode_FromString("|");
if (likely(sep))
signature = PyUnicode_Join(sep, list);
__pyx_err:
;
Py_DECREF(list);
Py_XDECREF(sep);
} else {
signature = _obj_to_str(idx);
}
if (!signature)
return NULL;
unbound_result_func = PyObject_GetItem(self->__signatures__, signature);
if (unbound_result_func) {
if (self->self || self->type) {
__pyx_FusedFunctionObject *unbound = (__pyx_FusedFunctionObject *) unbound_result_func;
Py_CLEAR(unbound->func.func_classobj);
Py_XINCREF(self->func.func_classobj);
unbound->func.func_classobj = self->func.func_classobj;
result_func = __pyx_FusedFunction_descr_get(unbound_result_func,
self->self, self->type);
} else {
result_func = unbound_result_func;
Py_INCREF(result_func);
}
}
Py_DECREF(signature);
Py_XDECREF(unbound_result_func);
return result_func;
}
static PyObject *
__pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
int static_specialized = (cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD &&
!((__pyx_FusedFunctionObject *) func)->__signatures__);
if (cyfunc->flags & __Pyx_CYFUNCTION_CCLASS && !static_specialized) {
return __Pyx_CyFunction_CallAsMethod(func, args, kw);
} else {
return __Pyx_CyFunction_Call(func, args, kw);
}
}
static PyObject *
__pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func;
Py_ssize_t argc = PyTuple_GET_SIZE(args);
PyObject *new_args = NULL;
__pyx_FusedFunctionObject *new_func = NULL;
PyObject *result = NULL;
PyObject *self = NULL;
int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD;
int is_classmethod = binding_func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD;
if (binding_func->self) {
Py_ssize_t i;
new_args = PyTuple_New(argc + 1);
if (!new_args)
return NULL;
self = binding_func->self;
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_INCREF(self);
#endif
Py_INCREF(self);
PyTuple_SET_ITEM(new_args, 0, self);
for (i = 0; i < argc; i++) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *item = PyTuple_GET_ITEM(args, i);
Py_INCREF(item);
#else
PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad;
#endif
PyTuple_SET_ITEM(new_args, i + 1, item);
}
args = new_args;
} else if (binding_func->type) {
if (argc < 1) {
PyErr_SetString(PyExc_TypeError, "Need at least one argument, 0 given.");
return NULL;
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
self = PyTuple_GET_ITEM(args, 0);
#else
self = PySequence_ITEM(args, 0); if (unlikely(!self)) return NULL;
#endif
}
if (self && !is_classmethod && !is_staticmethod) {
int is_instance = PyObject_IsInstance(self, binding_func->type);
if (unlikely(!is_instance)) {
PyErr_Format(PyExc_TypeError,
"First argument should be of type %.200s, got %.200s.",
((PyTypeObject *) binding_func->type)->tp_name,
Py_TYPE(self)->tp_name);
goto bad;
} else if (unlikely(is_instance == -1)) {
goto bad;
}
}
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_XDECREF(self);
self = NULL;
#endif
if (binding_func->__signatures__) {
PyObject *tup;
if (is_staticmethod && binding_func->func.flags & __Pyx_CYFUNCTION_CCLASS) {
tup = PyTuple_Pack(3, args,
kw == NULL ? Py_None : kw,
binding_func->func.defaults_tuple);
if (unlikely(!tup)) goto bad;
new_func = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_CallMethod(
func, binding_func->__signatures__, tup, NULL);
} else {
tup = PyTuple_Pack(4, binding_func->__signatures__, args,
kw == NULL ? Py_None : kw,
binding_func->func.defaults_tuple);
if (unlikely(!tup)) goto bad;
new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL);
}
Py_DECREF(tup);
if (unlikely(!new_func))
goto bad;
Py_XINCREF(binding_func->func.func_classobj);
Py_CLEAR(new_func->func.func_classobj);
new_func->func.func_classobj = binding_func->func.func_classobj;
func = (PyObject *) new_func;
}
result = __pyx_FusedFunction_callfunction(func, args, kw);
bad:
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_XDECREF(self);
#endif
Py_XDECREF(new_args);
Py_XDECREF((PyObject *) new_func);
return result;
}
static PyMemberDef __pyx_FusedFunction_members[] = {
{(char *) "__signatures__",
T_OBJECT,
offsetof(__pyx_FusedFunctionObject, __signatures__),
READONLY,
0},
{0, 0, 0, 0, 0},
};
static PyMappingMethods __pyx_FusedFunction_mapping_methods = {
0,
(binaryfunc) __pyx_FusedFunction_getitem,
0,
};
static PyTypeObject __pyx_FusedFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"fused_cython_function",
sizeof(__pyx_FusedFunctionObject),
0,
(destructor) __pyx_FusedFunction_dealloc,
0,
0,
0,
#if PY_MAJOR_VERSION < 3
0,
#else
0,
#endif
0,
0,
0,
&__pyx_FusedFunction_mapping_methods,
0,
(ternaryfunc) __pyx_FusedFunction_call,
0,
0,
0,
0,
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE,
0,
(traverseproc) __pyx_FusedFunction_traverse,
(inquiry) __pyx_FusedFunction_clear,
0,
0,
0,
0,
0,
__pyx_FusedFunction_members,
__pyx_CyFunction_getsets,
&__pyx_CyFunctionType_type,
0,
__pyx_FusedFunction_descr_get,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
#if PY_VERSION_HEX >= 0x030400a1
0,
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0,
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0,
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0,
#endif
};
static int __pyx_FusedFunction_init(void) {
__pyx_FusedFunctionType_type.tp_base = __pyx_CyFunctionType;
__pyx_FusedFunctionType = __Pyx_FetchCommonType(&__pyx_FusedFunctionType_type);
if (__pyx_FusedFunctionType == NULL) {
return -1;
}
return 0;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
(void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = NULL;
PyObject *py_funcname = NULL;
#if PY_MAJOR_VERSION < 3
PyObject *py_srcfile = NULL;
py_srcfile = PyString_FromString(filename);
if (!py_srcfile) goto bad;
#endif
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
if (!py_funcname) goto bad;
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
if (!py_funcname) goto bad;
funcname = PyUnicode_AsUTF8(py_funcname);
if (!funcname) goto bad;
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
if (!py_funcname) goto bad;
#endif
}
#if PY_MAJOR_VERSION < 3
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
#else
py_code = PyCode_NewEmpty(filename, funcname, py_line);
#endif
Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline
return py_code;
bad:
Py_XDECREF(py_funcname);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_srcfile);
#endif
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* BytesContains */
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) {
const Py_ssize_t length = PyBytes_GET_SIZE(bytes);
char* char_start = PyBytes_AS_STRING(bytes);
return memchr(char_start, (unsigned char)character, (size_t)length) != NULL;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* ImportNumPyArray */
static PyObject* __Pyx__ImportNumPyArray(void) {
PyObject *numpy_module, *ndarray_object = NULL;
numpy_module = __Pyx_Import(__pyx_n_s_numpy, NULL, 0);
if (likely(numpy_module)) {
ndarray_object = PyObject_GetAttrString(numpy_module, "ndarray");
Py_DECREF(numpy_module);
}
if (unlikely(!ndarray_object)) {
PyErr_Clear();
}
if (unlikely(!ndarray_object || !PyObject_TypeCheck(ndarray_object, &PyType_Type))) {
Py_XDECREF(ndarray_object);
Py_INCREF(Py_None);
ndarray_object = Py_None;
}
return ndarray_object;
}
static CYTHON_INLINE PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void) {
if (unlikely(!__pyx_numpy_ndarray)) {
__pyx_numpy_ndarray = __Pyx__ImportNumPyArray();
}
Py_INCREF(__pyx_numpy_ndarray);
return __pyx_numpy_ndarray;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const char neg_one = (char) -1, const_zero = (char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) {
if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) {
return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o);
#if PY_MAJOR_VERSION < 3
} else if (likely(PyInt_CheckExact(o))) {
return PyInt_AS_LONG(o);
#endif
} else {
Py_ssize_t ival;
PyObject *x;
x = PyNumber_Index(o);
if (!x) return -1;
ival = PyInt_AsLong(x);
Py_DECREF(x);
return ival;
}
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
DRB093-doall2-collapse-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
collapse(2) is used to associate two loops with omp for.
The corresponding loop iteration variables are private.
*/
int a[100][100];
int main()
{
int i, j;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<100; j ++ )
{
a[i][j]=i;
}
}
#pragma cetus private(i, j)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#1#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<100; j ++ )
{
a[i][j]=(a[i][j]+1);
}
}
#pragma cetus private(i, j)
#pragma loop name main#2
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#2#0
for (j=0; j<100; j ++ )
{
printf("%d\n", a[i][j]);
}
}
_ret_val_0=0;
return _ret_val_0;
}
|
GB_unop__log2_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log2_fp32_fp32
// op(A') function: GB_unop_tran__log2_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log2f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log2f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log2_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log2_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
region_layer.c | #include "region_layer.h"
#include "activations.h"
#include "blas.h"
#include "box.h"
#include "dark_cuda.h"
#include "utils.h"
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#define DOABS 1
region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes)
{
region_layer l = { (LAYER_TYPE)0 };
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.classes = classes;
l.coords = coords;
l.cost = (float*)calloc(1, sizeof(float));
l.biases = (float*)calloc(n * 2, sizeof(float));
l.bias_updates = (float*)calloc(n * 2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.max_boxes = max_boxes;
l.truths = max_boxes*(5);
l.delta = (float*)calloc(batch * l.outputs, sizeof(float));
l.output = (float*)calloc(batch * l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
l.backward = backward_region_layer;
#ifdef GPU
l.forward_gpu = forward_region_layer_gpu;
l.backward_gpu = backward_region_layer_gpu;
l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
#endif
fprintf(stderr, "detection\n");
srand(time(0));
return l;
}
void resize_region_layer(layer *l, int w, int h)
{
#ifdef GPU
int old_w = l->w;
int old_h = l->h;
#endif
l->w = w;
l->h = h;
l->outputs = h*w*l->n*(l->classes + l->coords + 1);
l->inputs = l->outputs;
l->output = (float*)realloc(l->output, l->batch * l->outputs * sizeof(float));
l->delta = (float*)realloc(l->delta, l->batch * l->outputs * sizeof(float));
#ifdef GPU
if (old_w < w || old_h < h) {
cuda_free(l->delta_gpu);
cuda_free(l->output_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
}
#endif
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h)
{
box b;
b.x = (i + logistic_activate(x[index + 0])) / w;
b.y = (j + logistic_activate(x[index + 1])) / h;
b.w = exp(x[index + 2]) * biases[2*n];
b.h = exp(x[index + 3]) * biases[2*n+1];
if(DOABS){
b.w = exp(x[index + 2]) * biases[2*n] / w;
b.h = exp(x[index + 3]) * biases[2*n+1] / h;
}
return b;
}
float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale)
{
box pred = get_region_box(x, biases, n, index, i, j, w, h);
float iou = box_iou(pred, truth);
float tx = (truth.x*w - i);
float ty = (truth.y*h - j);
float tw = log(truth.w / biases[2*n]);
float th = log(truth.h / biases[2*n + 1]);
if(DOABS){
tw = log(truth.w*w / biases[2*n]);
th = log(truth.h*h / biases[2*n + 1]);
}
delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0]));
delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1]));
delta[index + 2] = scale * (tw - x[index + 2]);
delta[index + 3] = scale * (th - x[index + 3]);
return iou;
}
void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss)
{
int i, n;
if(hier){
float pred = 1;
while(class_id >= 0){
pred *= output[index + class_id];
int g = hier->group[class_id];
int offset = hier->group_offset[g];
for(i = 0; i < hier->group_size[g]; ++i){
delta[index + offset + i] = scale * (0 - output[index + offset + i]);
}
delta[index + class_id] = scale * (1 - output[index + class_id]);
class_id = hier->parent[class_id];
}
*avg_cat += pred;
} else {
// Focal loss
if (focal_loss) {
// Focal Loss
float alpha = 0.5; // 0.25 or 0.5
//float gamma = 2; // hardcoded in many places of the grad-formula
int ti = index + class_id;
float pt = output[ti] + 0.000000000000001F;
// http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d
float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832
//float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss
for (n = 0; n < classes; ++n) {
delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]);
delta[index + n] *= alpha*grad;
if (n == class_id) *avg_cat += output[index + n];
}
}
else {
// default
for (n = 0; n < classes; ++n) {
delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]);
if (n == class_id) *avg_cat += output[index + n];
}
}
}
}
float logit(float x)
{
return log(x/(1.-x));
}
float tisnan(float x)
{
return (x != x);
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc;
}
void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output);
void forward_region_layer(const region_layer l, network_state state)
{
int i,j,b,t,n;
int size = l.coords + l.classes + 1;
memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
flatten(l.output, l.w*l.h, size*l.n, l.batch, 1);
#endif
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
l.output[index + 4] = logistic_activate(l.output[index + 4]);
}
}
#ifndef GPU
if (l.softmax_tree){
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5);
}
}
} else if (l.softmax){
for (b = 0; b < l.batch; ++b){
for(i = 0; i < l.h*l.w*l.n; ++i){
int index = size*i + b*l.outputs;
softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1);
}
}
}
#endif
if(!state.train) return;
memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
float avg_iou = 0;
float recall = 0;
float avg_cat = 0;
float avg_obj = 0;
float avg_anyobj = 0;
int count = 0;
int class_count = 0;
*(l.cost) = 0;
for (b = 0; b < l.batch; ++b) {
if(l.softmax_tree){
int onlyclass_id = 0;
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*5 + b*l.truths);
if(!truth.x) break; // continue;
int class_id = state.truth[t*5 + b*l.truths + 4];
float maxp = 0;
int maxi = 0;
if(truth.x > 100000 && truth.y > 100000){
for(n = 0; n < l.n*l.w*l.h; ++n){
int index = size*n + b*l.outputs + 5;
float scale = l.output[index-1];
float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id);
if(p > maxp){
maxp = p;
maxi = n;
}
}
int index = size*maxi + b*l.outputs + 5;
delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss);
++class_count;
onlyclass_id = 1;
break;
}
}
if(onlyclass_id) continue;
}
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs;
box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h);
float best_iou = 0;
int best_class_id = -1;
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*5 + b*l.truths);
int class_id = state.truth[t * 5 + b*l.truths + 4];
if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file
if(!truth.x) break; // continue;
float iou = box_iou(pred, truth);
if (iou > best_iou) {
best_class_id = state.truth[t*5 + b*l.truths + 4];
best_iou = iou;
}
}
avg_anyobj += l.output[index + 4];
l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4]));
if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4]));
else{
if (best_iou > l.thresh) {
l.delta[index + 4] = 0;
if(l.classfix > 0){
delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss);
++class_count;
}
}
}
if(*(state.net.seen) < 12800){
box truth = {0};
truth.x = (i + .5)/l.w;
truth.y = (j + .5)/l.h;
truth.w = l.biases[2*n];
truth.h = l.biases[2*n+1];
if(DOABS){
truth.w = l.biases[2*n]/l.w;
truth.h = l.biases[2*n+1]/l.h;
}
delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01);
}
}
}
}
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box(state.truth + t*5 + b*l.truths);
int class_id = state.truth[t * 5 + b*l.truths + 4];
if (class_id >= l.classes) {
printf(" Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1);
getchar();
continue; // if label contains class_id more than number of classes in the cfg-file
}
if(!truth.x) break; // continue;
float best_iou = 0;
int best_index = 0;
int best_n = 0;
i = (truth.x * l.w);
j = (truth.y * l.h);
//printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h);
box truth_shift = truth;
truth_shift.x = 0;
truth_shift.y = 0;
//printf("index %d %d\n",i, j);
for(n = 0; n < l.n; ++n){
int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs;
box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h);
if(l.bias_match){
pred.w = l.biases[2*n];
pred.h = l.biases[2*n+1];
if(DOABS){
pred.w = l.biases[2*n]/l.w;
pred.h = l.biases[2*n+1]/l.h;
}
}
//printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h);
pred.x = 0;
pred.y = 0;
float iou = box_iou(pred, truth_shift);
if (iou > best_iou){
best_index = index;
best_iou = iou;
best_n = n;
}
}
//printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h);
float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale);
if(iou > .5) recall += 1;
avg_iou += iou;
//l.delta[best_index + 4] = iou - l.output[best_index + 4];
avg_obj += l.output[best_index + 4];
l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]);
if (l.rescore) {
l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]);
}
if (l.map) class_id = l.map[class_id];
delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss);
++count;
++class_count;
}
}
//printf("\n");
#ifndef GPU
flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0);
#endif
*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count);
}
void backward_region_layer(const region_layer l, network_state state)
{
axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1);
}
void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map)
{
int i;
float *const predictions = l.output;
#pragma omp parallel for
for (i = 0; i < l.w*l.h; ++i){
int j, n;
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = i*l.n + n;
int p_index = index * (l.classes + 5) + 4;
float scale = predictions[p_index];
if(l.classfix == -1 && scale < .5) scale = 0;
int box_index = index * (l.classes + 5);
boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);
boxes[index].x *= w;
boxes[index].y *= h;
boxes[index].w *= w;
boxes[index].h *= h;
int class_index = index * (l.classes + 5) + 5;
if(l.softmax_tree){
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);
int found = 0;
if(map){
for(j = 0; j < 200; ++j){
float prob = scale*predictions[class_index+map[j]];
probs[index][j] = (prob > thresh) ? prob : 0;
}
} else {
for(j = l.classes - 1; j >= 0; --j){
if(!found && predictions[class_index + j] > .5){
found = 1;
} else {
predictions[class_index + j] = 0;
}
float prob = predictions[class_index+j];
probs[index][j] = (scale > thresh) ? prob : 0;
}
}
} else {
for(j = 0; j < l.classes; ++j){
float prob = scale*predictions[class_index+j];
probs[index][j] = (prob > thresh) ? prob : 0;
}
}
if(only_objectness){
probs[index][0] = scale;
}
}
}
}
#ifdef GPU
void forward_region_layer_gpu(const region_layer l, network_state state)
{
/*
if(!state.train){
copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1);
return;
}
*/
flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu);
if(l.softmax_tree){
int i;
int count = 5;
for (i = 0; i < l.softmax_tree->groups; ++i) {
int group_size = l.softmax_tree->group_size[i];
softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count);
count += group_size;
}
}else if (l.softmax){
softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5);
}
float* in_cpu = (float*)calloc(l.batch * l.inputs, sizeof(float));
float *truth_cpu = 0;
if(state.truth){
int num_truth = l.batch*l.truths;
truth_cpu = (float*)calloc(num_truth, sizeof(float));
cuda_pull_array(state.truth, truth_cpu, num_truth);
}
cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs);
//cudaStreamSynchronize(get_cuda_stream());
network_state cpu_state = state;
cpu_state.train = state.train;
cpu_state.truth = truth_cpu;
cpu_state.input = in_cpu;
forward_region_layer(l, cpu_state);
//cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs);
free(cpu_state.input);
if(!state.train) return;
cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
//cudaStreamSynchronize(get_cuda_stream());
if(cpu_state.truth) free(cpu_state.truth);
}
void backward_region_layer_gpu(region_layer l, network_state state)
{
flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta);
}
#endif
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w = 0;
int new_h = 0;
if (((float)netw / w) < ((float)neth / h)) {
new_w = netw;
new_h = (h * netw) / w;
}
else {
new_h = neth;
new_w = (w * neth) / h;
}
for (i = 0; i < n; ++i) {
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw);
b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth);
b.w *= (float)netw / new_w;
b.h *= (float)neth / new_h;
if (!relative) {
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i, j, n, z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w / 2; ++i) {
for (n = 0; n < l.n; ++n) {
for (z = 0; z < l.classes + l.coords + 1; ++z) {
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if (z == 0) {
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for (i = 0; i < l.outputs; ++i) {
l.output[i] = (l.output[i] + flip[i]) / 2.;
}
}
for (i = 0; i < l.w*l.h; ++i) {
int row = i / l.w;
int col = i % l.w;
for (n = 0; n < l.n; ++n) {
int index = n*l.w*l.h + i;
for (j = 0; j < l.classes; ++j) {
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if (dets[index].mask) {
for (j = 0; j < l.coords - 4; ++j) {
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if (l.softmax_tree) {
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h);
if (map) {
for (j = 0; j < 200; ++j) {
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
else {
int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h);
dets[index].prob[j] = (scale > thresh) ? scale : 0;
}
}
else {
if (dets[index].objectness) {
for (j = 0; j < l.classes; ++j) {
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void zero_objectness(layer l)
{
int i, n;
for (i = 0; i < l.w*l.h; ++i) {
for (n = 0; n < l.n; ++n) {
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
l.output[obj_index] = 0;
}
}
}
|
edgebased_levelset.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Antonia Larese
//
#if !defined(KRATOS_EDGEBASED_LEVELSET_FLUID_SOLVER_H_INCLUDED)
#define KRATOS_EDGEBASED_LEVELSET_FLUID_SOLVER_H_INCLUDED
//#define SPLIT_OSS
// #define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/node.h"
#include "includes/cfd_variables.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "free_surface_application.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class EdgeBasedLevelSet
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef std::size_t SizeType;
//constructor and destructor
EdgeBasedLevelSet(MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part,
const double viscosity,
const double density,
const Vector body_force,
bool use_mass_correction,
double edge_detection_angle,
double stabdt_pressure_factor,
double stabdt_convection_factor,
double tau2_factor,
bool assume_constant_dp
)
: mr_matrix_container(mr_matrix_container),
mr_model_part(mr_model_part),
mstabdt_pressure_factor(stabdt_pressure_factor),
mstabdt_convection_factor(stabdt_convection_factor),
medge_detection_angle(edge_detection_angle),
mtau2_factor(tau2_factor),
massume_constant_dp(assume_constant_dp)
{
for (ModelPart::NodesContainerType::iterator it=mr_model_part.NodesBegin(); it!=mr_model_part.NodesEnd(); it++)
it->FastGetSolutionStepValue (VISCOSITY) = viscosity;
mMolecularViscosity = viscosity;
for(unsigned int i = 0; i<TDim; i++)
mBodyForce[i] = body_force[i];
mRho = density;
mdelta_t_avg = 1000.0;
max_dt = 1.0;
muse_mass_correction = use_mass_correction;
mshock_coeff = 0.7;
mWallLawIsActive = false;
};
~EdgeBasedLevelSet()
{
};
//***********************************
//function to initialize fluid solver
void Initialize(
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mViscosity.resize (n_nodes);
mr_matrix_container.SetToZero (mViscosity);
mWork.resize(n_nodes);
mr_matrix_container.SetToZero(mWork);
mvel_n.resize(n_nodes);
mr_matrix_container.SetToZero(mvel_n);
mvel_n1.resize(n_nodes);
mr_matrix_container.SetToZero(mvel_n1);
mPn.resize(n_nodes);
mr_matrix_container.SetToZero(mPn);
mPn1.resize(n_nodes);
mr_matrix_container.SetToZero(mPn1);
mHmin.resize(n_nodes);
mr_matrix_container.SetToZero(mHmin);
mHavg.resize(n_nodes);
mr_matrix_container.SetToZero(mHavg);
mNodalFlag.resize(n_nodes);
mr_matrix_container.SetToZero(mNodalFlag);
mdistances.resize(n_nodes);
mr_matrix_container.SetToZero(mdistances);
mTauPressure.resize(n_nodes);
mr_matrix_container.SetToZero(mTauPressure);
mTauConvection.resize(n_nodes);
mr_matrix_container.SetToZero(mTauConvection);
mTau2.resize(n_nodes);
mr_matrix_container.SetToZero(mTau2);
mPi.resize(n_nodes);
mr_matrix_container.SetToZero(mPi);
mXi.resize(n_nodes);
mr_matrix_container.SetToZero(mXi);
mx.resize(n_nodes);
mr_matrix_container.SetToZero(mx);
mEdgeDimensions.resize(n_edges);
mr_matrix_container.SetToZero(mEdgeDimensions);
//convection variables
mBeta.resize(n_nodes);
mr_matrix_container.SetToZero(mBeta);
mPiConvection.resize(n_nodes);
mr_matrix_container.SetToZero(mPiConvection);
mphi_n.resize(n_nodes);
mr_matrix_container.SetToZero(mphi_n);
mphi_n1.resize(n_nodes);
mr_matrix_container.SetToZero(mphi_n1);
mEps.resize(n_nodes);
mr_matrix_container.SetToZero(mEps);
//mD.resize(n_nodes); mr_matrix_container.SetToZero(mD);
mA.resize(n_nodes);
mr_matrix_container.SetToZero(mA);
mB.resize(n_nodes);
mr_matrix_container.SetToZero(mB);
mStrVel.resize(n_nodes);
mr_matrix_container.SetToZero(mStrVel);
mdiv_error.resize(n_nodes);
mr_matrix_container.SetToZero(mdiv_error);
mdiag_stiffness.resize (n_nodes);
mr_matrix_container.SetToZero (mdiag_stiffness);
mis_slip.resize (n_nodes);
// ValuesVectorType external_pressure;
// external_pressure.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() );
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes());
mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes());
//set flag for first time step
mFirstStep = true;
//loop to categorize boundary nodes
std::vector< unsigned int> tempFixedVelocities;
std::vector< array_1d<double,TDim> > tempFixedVelocitiesValues;
std::vector< unsigned int> tempPressureOutletList;
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
int index = inode->FastGetSolutionStepValue(AUX_INDEX);
if (inode->IsFixed(VELOCITY_X)) //note that the variables can be either all fixed or no one fixed
{
if (inode->IsFixed(VELOCITY_Y) == false || inode->IsFixed(VELOCITY_Z) == false)
{
std::cout << "error found on the fixity of node " << inode->Id() << std::endl;
KRATOS_THROW_ERROR(std::logic_error, "velocities can be either all fixed or none fixed", "")
}
tempFixedVelocities.push_back(index);
tempFixedVelocitiesValues.push_back(mvel_n1[index]);
}
if (inode->IsFixed(PRESSURE))
{
tempPressureOutletList.push_back(index);
// mPressureOutlet.push_back(external_pressure[index]);
}
}
mFixedVelocities.resize(tempFixedVelocities.size(),false);
mFixedVelocitiesValues.resize(tempFixedVelocitiesValues.size(),false);
mPressureOutletList.resize(tempPressureOutletList.size(),false);
#pragma omp parallel for
for(int i=0; i< static_cast<int>(tempFixedVelocities.size()); i++)
{
mFixedVelocities[i] = tempFixedVelocities[i];
mFixedVelocitiesValues[i] = tempFixedVelocitiesValues[i];
}
#pragma omp parallel for
for(int i=0; i< static_cast<int>(tempPressureOutletList.size()); i++)
{
mPressureOutletList[i] = tempPressureOutletList[i];
}
//compute slip normals and fill SlipList
CalculateNormals(mr_model_part.Conditions());
mr_matrix_container.WriteVectorToDatabase(NORMAL, mSlipNormal, mr_model_part.Nodes());
if(TDim == 3)
DetectEdges3D(mr_model_part.Conditions());
//determine number of edges and entries
//// not implemented in ublas yet !!!
//unsigned int n_nonzero_entries = 2 * n_edges + n_nodes;
//allocate memory for variables
mL.resize(n_nodes, n_nodes, false);
int number_of_threads= OpenMPUtils::GetNumThreads();
std::vector<int> row_partition(number_of_threads);
OpenMPUtils::DivideInPartitions(n_nodes,number_of_threads,row_partition);
for (int k = 0; k < number_of_threads; k++)
{
#pragma omp parallel
if (OpenMPUtils::ThisThread() == k)
{
for (int i_node = static_cast<int> (row_partition[k]); i_node < static_cast<int> (row_partition[k + 1]); i_node++)
{
//loop over all nodes
// for (unsigned int i_node = 0; i_node < n_nodes; i_node++) {
//flag for considering diagonal matrix elements
bool flag = 0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//define matrix structure row by row (the order does matter!)
if ((static_cast<int>(j_neighbour) > i_node) && (flag == 0))
{
//add diagonal/nodal contribution
mL.push_back(i_node, i_node, 0.0);
flag = 1;
}
//add non-diagonal/edge contribution
mL.push_back(i_node, j_neighbour, 0.0);
}
//if diagonal element is the last non-zero element of the row
if (flag == 0)
mL.push_back(i_node, i_node, 0.0);
}
}
}
//compute minimum length of the surrounding edges
CalculateEdgeLengths(mr_model_part.Nodes());
//set the pressure projection to the body force value
array_1d<double,3> temp = ZeroVector(3);
for(unsigned int i = 0 ; i < TDim; i++)
temp[i]= mRho * mBodyForce[i];
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
array_1d<double, 3> & press_proj = inode->FastGetSolutionStepValue(PRESS_PROJ);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
press_proj[l_comp] = temp[l_comp];
}
KRATOS_CATCH("")
}
void SetShockCapturingCoefficient(double coeff)
{
mshock_coeff = coeff;
}
//***************************************
//function to set adequate time step size
double ComputeTimeStep(const double CFLNumber, const double MaxDt)
{
KRATOS_TRY
//save the maximum time step
max_dt = MaxDt;
//local variable for time step size
double delta_t = 1e10;//max_dt;
mdelta_t_avg = 1e10;//max_dt;
//getting value of current velocity and of viscosity
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, mr_model_part.Nodes() );
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(STRUCTURE_VELOCITY, mStrVel, mr_model_part.Nodes());
//*******************
//loop over all nodes
unsigned int n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
const array_1d<double, TDim>& v_i = mvel_n1[i_node];
const double havg_i = mHavg[i_node];
const double hmin_i = mHmin[i_node];
const double eps_i = mEps[i_node];
//const double d_i = mD[i_node];
const double nu = mViscosity[i_node];
// const double lindarcy_i = mA[i_node];
// const double nonlindarcy_i = mB[i_node];
// const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
// array_1d<double, TDim> rel_vel_i;
// rel_vel_i[0] = v_i[0] - str_v_i[0];
// rel_vel_i[1] = v_i[1] - str_v_i[1];
// rel_vel_i[2] = v_i[2] - str_v_i[2];
// double rel_vel_norm = norm_2(rel_vel_i);
// double vel_norm = norm_2(v_i);
double vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
vel_norm += v_i[l_comp]*v_i[l_comp];
}
vel_norm = sqrt(vel_norm);
// double porosity_coefficient = ComputePorosityCoefficient( rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
vel_norm /= eps_i;
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (2.0 * vel_norm /hmin_i + 4.0 * nu / (hmin_i * hmin_i)/*+ porosity_coefficient*/);
double delta_t_i_avg = 1.0 / (2.0 * vel_norm /havg_i + 4.0 * nu / (havg_i * havg_i) /*+ porosity_coefficient*/);
// double delta_t_i = 1.0 / ( vel_norm /hmin_i + nu / (hmin_i * hmin_i)/*+ porosity_coefficient*/);
// double delta_t_i_avg = 1.0 / ( vel_norm /havg_i + nu / (havg_i * havg_i) /*+ porosity_coefficient*/);
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
double v_diff_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
double temp = v_i[l_comp] - v_j[l_comp];
v_diff_norm += temp*temp;
}
v_diff_norm = sqrt(v_diff_norm);
v_diff_norm /= eps_i;
double delta_t_j = CFLNumber * 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * nu / (hmin_i * hmin_i));
// double delta_t_j = 1.0 / ( v_diff_norm /hmin_i + nu / (hmin_i * hmin_i));
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
if(delta_t_i_avg < mdelta_t_avg)
mdelta_t_avg = delta_t_i_avg;
}
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
return delta_t;
KRATOS_CATCH("")
}
void ApplySmagorinsky (double MolecularViscosity, double Cs)
{
if (Cs != 0)
{
if (TDim == 3)
ApplySmagorinsky3D (MolecularViscosity, Cs);
else
ApplySmagorinsky2D (MolecularViscosity, Cs);
}
}
void UpdateFixedVelocityValues()
{
KRATOS_TRY
//read velocity and pressure data from Kratos
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
const array_1d<double, TDim>& u_i = mvel_n1[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i_fix[comp] = u_i[comp];
}
KRATOS_CATCH("");
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
void SolveStep1()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
CalcVectorType rhs;
rhs.resize(n_nodes);
//read velocity and pressure data from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes);
mr_matrix_container.FillScalarFromDatabase (VISCOSITY, mViscosity, rNodes);
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes);
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes);
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
//mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(STRUCTURE_VELOCITY, mStrVel, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute intrinsic time
double time_inv_avg = 1.0/mdelta_t_avg;
double stabdt_pressure_factor = mstabdt_pressure_factor;
double stabdt_convection_factor = mstabdt_convection_factor;
double tau2_factor = mtau2_factor;
#pragma omp parallel for firstprivate(time_inv_avg,stabdt_pressure_factor,stabdt_convection_factor,tau2_factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_avg_i = mHavg[i_node];
array_1d<double, TDim>& a_i = mvel_n1[i_node];
const double nu_i = mViscosity[i_node];
const double eps_i = mEps[i_node];
const double lindarcy_i = mA[i_node];
const double nonlindarcy_i = mB[i_node];
double vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
vel_norm += a_i[l_comp]*a_i[l_comp];
}
vel_norm = sqrt(vel_norm);
const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
array_1d<double, TDim> rel_vel_i;
double rel_vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
rel_vel_i[l_comp] = a_i[l_comp] - str_v_i[l_comp];
rel_vel_norm += rel_vel_i[l_comp]*rel_vel_i[l_comp];
}
rel_vel_norm = sqrt(rel_vel_norm);
double porosity_coefficient = ComputePorosityCoefficient(rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
vel_norm /= eps_i;
// double tau = 1.0 / (2.0 * vel_norm / h_avg_i + time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
// double denom = (2.0 * vel_norm / h_avg_i + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
// double tau = 0.0;
// if(denom > max_dt_inv_coeff)
// tau = max_dt_coeff;
// else
// tau = 1.0/denom;
// double tau = 1.0 / (2.0 * vel_norm / h_avg_i + max_dt_inv + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
double tau = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_pressure_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
// double tau = 1.0 / (2.0 * vel_norm / h_avg_i + 0.01*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
double tau_conv = 1.0 / (2.0 * vel_norm / h_avg_i + stabdt_convection_factor*time_inv_avg + (4.0*nu_i) / (h_avg_i * h_avg_i) + porosity_coefficient);
mTauPressure[i_node] = tau;
mTauConvection[i_node] = tau_conv;
mTau2[i_node] = (nu_i + h_avg_i*vel_norm*0.5)*tau2_factor;
// mTauPressure[i_node] = 1.0 / (2.0 * vel_norm / mHavg[i_node] + (4.0*nu_i) / (mHavg[i_node] * mHavg[i_node]));
// mTauConvection[i_node] = 1.0 / (2.0 * vel_norm / h_i + time_inv + (4.0*nu_i) / (h_i * h_i));
//// mTauPressure[i_node] = 1.0 / (2.0 * vel_norm / h_i + 0.01 * time_inv + 4.0 * nu_i / (h_i * h_i));
//// // mTauPressure[i_node] = delta_t;
//// mTauConvection[i_node] = 1.0 / (2.0 * vel_norm / h_i + 0.01 * time_inv + 4.0 * nu_i / (h_i * h_i));
// if (mTauPressure[i_node] < delta_t)
// mTauPressure[i_node] = delta_t;
// else if(mTauPressure[i_node] > 100.0*delta_t)
// mTauPressure[i_node] = 100.0*delta_t;
}
//// //the tau is set to 1/dt on the corner nodes
//// //apply conditions on corners
//// int corner_size = mcorner_nodes.size();
//// for (int i = 0; i < corner_size; i++)
//// {
//// int i_node = mcorner_nodes[i];
//// mTauPressure[i_node] = mdelta_t_avg;
//// mTauConvection[i_node] = mdelta_t_avg;
//// }
// //laplacian smoothing on the taus
// //note here that we use mTau2 as a temporary vector
// LaplacianSmooth(mTauConvection, mTau2);
// LaplacianSmooth(mTauPressure, mTau2);
// #pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// mTau2[i_node] = 0.0;
// mr_matrix_container.AssignVectorToVector(mTauPressure, mTauConvection);
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPi[i_node]; //******************
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
array_1d<double, TDim> a_i = mvel_n1[i_node];
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
// const double& p_i = mPn1[i_node];
const double& eps_i = mEps[i_node];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_i /= eps_i;
/*convective front velocity == fluid velocity - structural velocity*/
// // ****************************************rel_vel_modifications_b
// const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_i[comp] -= str_v_i[comp];}
// // ****************************************rel_vel_modifications_e
//const double& p_i = pressure[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = mvel_n1[j_neighbour];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
const double& eps_j = mEps[j_neighbour];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_j /= eps_j;
/*convective front velocity == fluid velocity - structural velocity*/
// // ****************************************rel_vel_modifications_b
// const array_1d<double, TDim>& str_v_j = mStrVel[j_neighbour];
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_j[comp] -= str_v_j[comp];}
// // ****************************************rel_vel_modifications_e
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_ConvectiveContribution(pi_i, a_i, U_i, a_j, U_j);
// edge_ij.Add_grad_p(pi_i, p_i, p_j);
}
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
}
//std::cout << "substep " << substep+1 << " of " << n_substeps << std::endl;
mr_matrix_container.AssignVectorToVector (mvel_n, mWork); //mWork = mvel_n
//first step of Runge Kutta
mr_matrix_container.AssignVectorToVector (mvel_n, mvel_n1); //mvel_n1 = mvel_n
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness,rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//second step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, 0.5 * delta_t, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//third step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 3.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
Add_Effective_Inverse_Multiply (mvel_n1, mvel_n, delta_t, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
ApplyVelocityBC (mvel_n1);
//fourth step
mr_matrix_container.SetToZero (rhs);
CalculateRHS (mvel_n1, mPn, mvel_n1, rhs,mdiag_stiffness);
Add_Effective_Inverse_Multiply (mWork, mWork, delta_t / 6.0, mr_matrix_container.GetLumpedMass(),mdiag_stiffness, rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector (mWork, mvel_n1);
ApplyVelocityBC (mvel_n1);
//prepare for next step
//mr_matrix_container.AssignVectorToVector (mvel_n1, mvel_n);//???????????????????????????????????????
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS(
const CalcVectorType& vel,
const ValuesVectorType& pressure,
const CalcVectorType& convective_velocity,
CalcVectorType& rhs,
ValuesVectorType& diag_stiffness)
{
KRATOS_TRY
int n_nodes = vel.size();
//perform MPI syncronization
//calculating the RHS
array_1d<double, TDim> stab_low;
array_1d<double, TDim> stab_high;
double inverse_rho = 1.0 / mRho;
#pragma omp parallel for private(stab_low,stab_high)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
const double nu_i = mViscosity[i_node];
const double nu_j = nu_i;
array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& f_i = mBodyForce;
array_1d<double, TDim> a_i = convective_velocity[i_node];
// const double& beta_i = mBeta[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& pi_i = mPi[i_node];
const double& p_i = pressure[i_node];
const double& eps_i = mEps[i_node];
// //const double& d_i = mD[i_node];
const double lindarcy_i = mA[i_node];
const double nonlindarcy_i = mB[i_node];
const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
array_1d<double, TDim> rel_vel_i;
double rel_vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
rel_vel_i[l_comp] = U_i[l_comp] - str_v_i[l_comp];
rel_vel_norm += rel_vel_i[l_comp]*rel_vel_i[l_comp];
}
rel_vel_norm = sqrt(rel_vel_norm);
//const double& tau2_i = mTau2[i_node];
double edge_tau = mTauConvection[i_node];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_i /= eps_i;
/*convective front velocity == fluid velocity - structural velocity*/
// // ****************************************rel_vel_modifications_b
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_i[comp] -= str_v_i[comp];}
// // ****************************************rel_vel_modifications_e
//
//double& h_i = mHmin[i_node];
//initializing with the external forces (e.g. gravity)
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] = m_i * eps_i * f_i[comp] ;
//applying the effect of the porosity
// double porosity_coefficient = ComputePorosityCoefficient(mViscosity,norm_2(U_i),eps_i, d_i);
// double porosity_coefficient = ComputePorosityCoefficient( norm_2(U_i), eps_i, lindarcy_i, nonlindarcy_i);
double porosity_coefficient = ComputePorosityCoefficient( rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
diag_stiffness[i_node]= m_i * porosity_coefficient;
// /**************************************************rel_vel_modifications_b*/
for (unsigned int comp = 0; comp < TDim; comp++)
{
// rhs_i[comp] -= m_i * porosity_coefficient * U_i[comp];
rhs_i[comp] += m_i * porosity_coefficient * str_v_i[comp];
}
// /*************************************************rel_vel_modifications_e*/
//std::cout << i_node << "rhs =" << rhs_i << "after adding body force" << std::endl;
//convective term
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim> a_j = convective_velocity[j_neighbour];
const array_1d<double, TDim>& U_j = vel[j_neighbour];
const array_1d<double, TDim>& pi_j = mPi[j_neighbour];
const double& p_j = pressure[j_neighbour];
const double& eps_j = mEps[j_neighbour];
// const double& beta_j = mBeta[j_neighbour];
/*convective velocity == fluid velocity (not darcy velocity)*/
a_j /= eps_j;
/*convective front velocity == fluid velocity - structural velocity*/
// ****************************************rel_vel_modifications_b
// const array_1d<double, TDim>& str_v_j = mStrVel[j_neighbour];
// for(unsigned int comp = 0; comp < TDim; comp++)
// {a_j[comp] -= str_v_j[comp];}
// ****************************************/*rel_vel_modifications*/_e
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, U_i, a_j, U_j);
// std::cout << i_node << "rhs =" << rhs_i << "after convective contrib" << std::endl;
//take care! we miss including a B.C. for the external pressure
// edge_ij.Add_Gp(rhs_i,p_i*inverse_rho,p_j*inverse_rho);
edge_ij.Sub_grad_p(rhs_i, p_i*inverse_rho*eps_i, p_j * inverse_rho*eps_i);
// edge_ij.Add_grad_p(rhs_i, p_i*inverse_rho, p_j * inverse_rho);
// std::cout << i_node << "rhs =" << rhs_i << "after Gp" << std::endl;
edge_ij.Sub_ViscousContribution(rhs_i, U_i, nu_i, U_j, nu_j);
// std::cout << i_node << "rhs =" << rhs_i << "after viscous" << std::endl;
//add stabilization
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i, a_j, U_j);
// edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, U_i,p_i, a_j, U_j,p_j);
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
// double beta = 1.0;
// double beta = beta_i;
// if(beta_j > beta)
// beta = beta_j;
// beta = 1.0;
// edge_ij.Sub_StabContribution(rhs_i, edge_tau*beta, 1.0, stab_low, stab_high);
// edge_ij.Sub_StabContribution(rhs_i, edge_tau, (1.0-beta), stab_low, stab_high);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
// std::cout << i_node << "rhs =" << rhs_i << "after stab" << std::endl;
//add tau2 term
// boost::numeric::ublas::bounded_matrix<double,TDim,TDim>& LL = edge_ij.LaplacianIJ;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// double aaa = 0.0;
// for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
// aaa += LL(k_comp,m_comp) * (U_j[m_comp] - U_i[m_comp]);
// rhs_i[k_comp] -= tau2_i*aaa;
// }
}
// std::cout << i_node << "rhs =" << rhs_i << std::endl;
}
}
//apply wall resistance
if (mWallLawIsActive == true)
ComputeWallResistance (vel,diag_stiffness);
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
KRATOS_CATCH("")
}
//*************************************************************************
//function to solve fluid equations - fractional step 2: calculate pressure
void SolveStep2(typename TLinearSolver::Pointer pLinearSolver)
{
KRATOS_TRY
typedef Node < 3 > PointType;
typedef PointerVector<PointType > PointVector;
typedef PointVector::iterator PointIterator;
//reset is visited flag
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//Re-generate a container with LAYER 0 and LAYER 1 after convection of the free surface
std::vector< PointVector > layers(2);
//detect the nodes inside the fluid surface LAYER_0
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) < 0.0) //candidates are only the ones inside the fluid domain
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->FastGetSolutionStepValue(DISTANCE) >= 0.0) //add the node as free surface if one of its neighb is outside
{
if (inode->GetValue(IS_VISITED) == 0.0)
{
layers[0].push_back(*(inode.base()));
inode->GetValue(IS_VISITED) = 1.0;
}
}
}
}
else
inode->FastGetSolutionStepValue(PRESSURE) = 0.0;
}
//fill layer 1 by neighbour relationships
for (PointIterator iii = (layers[0]).begin(); iii != (layers[0]).end(); iii++)
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator jjj = neighb_nodes.begin(); jjj != neighb_nodes.end(); jjj++) //destination = origin1 + value * Minv*origin
{
if (jjj->FastGetSolutionStepValue(DISTANCE) >= 0 &&
jjj->GetValue(IS_VISITED) == 0.0)
{
layers[1].push_back(Node < 3 > ::Pointer(*(jjj.base())));
jjj->GetValue(IS_VISITED) = 2.0;
}
}
}
/* for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++) {
// std::cout << iii->Id() << " " << std::endl;
const array_1d<double, 3 > & coords_top = iii->Coordinates();
//extrapolate the average velocity
noalias(aux) = ZeroVector(3);
noalias(aux_proj) = ZeroVector(3);
double avg_number = 0.0;
double pavg = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) < (il + 1) && i->GetValue(IS_VISITED) != 0.0) {*/
//on the first layer outside the pressure is set to a value such that on the free surface the pressure is approx 0
for (PointIterator iii = layers[1].begin(); iii != layers[1].end(); iii++)
{
//get the node
unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX);
array_1d<double, TDim> grad_d;
for (unsigned int comp = 0; comp < TDim; comp++)
grad_d[comp] = 0.0;
double dist_i = mdistances[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& dist_j = mdistances[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(grad_d, dist_i, dist_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
grad_d[l_comp] *= m_inv;
double norm_grad = norm_2(grad_d);
if(norm_grad < 100.0)
{
grad_d /= norm_grad; //this is the direction of the gradient of the distances
grad_d *= dist_i; //this is the vector with the distance of node_i from the closest point on the free surface
//array_1d<double, TDim> press_grad;
double pestimate = 0.0;
const array_1d<double, 3> & r_press_proj = iii->FastGetSolutionStepValue(PRESS_PROJ);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pestimate += r_press_proj[l_comp]*grad_d[l_comp];
// press_grad[l_comp]= r_press_proj[l_comp];
iii->FastGetSolutionStepValue(PRESSURE) = pestimate;
}
else
{
std::cout << "attention gradient of distance much greater than 1 on node:" << i_node <<std::endl;
double avg_number = 0.0;
double pavg = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) == 1.0)
{
pavg += i->FastGetSolutionStepValue(PRESSURE);
avg_number += 1.0;
}
}
if(avg_number == 0)
KRATOS_THROW_ERROR(std::logic_error,"can not happen that the extrapolation node has no neighbours","");
iii->FastGetSolutionStepValue(PRESSURE) = pavg/avg_number;
}
}
//if a node is very close to the free surface (relatively to the element size) fix the pressure on it
// for(ModelPart::NodesContainerType::iterator iii = mr_model_part.NodesBegin(); iii!=mr_model_part.NodesEnd(); iii++)
// {
// unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX);
//
// double dist = mdistances[i_node];
// if(dist > 0.0 && dist < 0.01*mHavg[i_node])
// iii->FastGetSolutionStepValue(PRESSURE) = 0.0;
//
// }
//PREREQUISITES
//allocate memory for variables
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//unknown and right-hand side vector
TSystemVectorType dp, rhs;
dp.resize(n_nodes,false);
rhs.resize(n_nodes,false);
array_1d<double, TDim> dU_i, dU_j, work_array;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
#ifdef _OPENMP
// double time_inv = 0.0; //1.0/delta_t;
//read the pressure projection from the database
#endif
mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, rNodes);
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes);
//for (int i_node = 0; i_node < n_nodes; i_node++)
// std::cout << mvel_n1[i_node] << std::endl;
//loop over all nodes
// double rho_inv = 1.0 / mRho;
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i = 0.0;
const double& p_i = mPn1[i_node];
const double& p_old_i = mPn[i_node];
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
// const double& eps_i = mEps[i_node];
array_1d<double, TDim>& xi_i = mXi[i_node];
double l_ii = 0.0;
// double div_i = 0.0;
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
const double& p_old_j = mPn[j_neighbour];
const array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
const array_1d<double, TDim>& xi_j = mXi[j_neighbour];
// const double& eps_j = mEps[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
#ifdef SYMM_PRESS
double edge_tau = 0.25*(mTauPressure[i_node] + mTauPressure[j_neighbour]);
#else
double edge_tau = 0.5*mTauPressure[i_node];
#endif
// double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j);
//
if(edge_tau < delta_t) edge_tau=delta_t;
//compute laplacian operator
double sum_l_ikjk;
edge_ij.CalculateScalarLaplacian(sum_l_ikjk);
// double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau);
double sum_l_ikjk_onlydt = sum_l_ikjk * (delta_t);
sum_l_ikjk *= (delta_t + edge_tau);
//assemble right-hand side
//pressure contribution
// rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i);
rhs_i -= sum_l_ikjk * (p_j - p_i);
rhs_i += sum_l_ikjk_onlydt * (p_old_j - p_old_i);
//calculating the divergence of the fract vel
// edge_ij.Sub_D_v(div_i, U_i_curr*mRho*eps_i, U_j_curr * mRho*eps_j);
edge_ij.Sub_D_v(rhs_i, U_i_curr*mRho, U_j_curr * mRho);
// edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i);
//high order stabilizing term
double temp = 0.0;
// edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j);
edge_ij.Add_div_v(temp, xi_i, xi_j);
rhs_i += edge_tau * temp;
//assemble laplacian matrix
mL(i_node, j_neighbour) = sum_l_ikjk;
l_ii -= sum_l_ikjk;
}
// //area correction to prevent mass loss
// rhs_i -= mdiv_error[i_node];
// rhs_i += div_i * eps_i;
mL(i_node, i_node) = l_ii;
}
if(muse_mass_correction == true)
{
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
rhs_i -= mdiv_error[i_node];
}
}
//find the max diagonal term
double max_diag = 0.0;
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double L_diag = mL(i_node, i_node);
if (fabs(L_diag) > fabs(max_diag)) max_diag = L_diag;
}
if(max_diag < 1e20) max_diag=1e20;
//respect pressure boundary conditions by penalization
// double huge = max_diag * 1e6;
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mL(i_node, i_node) = huge;
// rhs[i_node] = 0.0;
// }
for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
{
unsigned int i_node = mPressureOutletList[i_pressure];
mL(i_node, i_node) = max_diag;
rhs[i_node] = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
mL(i_node, j_neighbour) = 0.0;
}
}
//modification for level_set
// mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
// for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++)
// {
// if(mdistances[i_dist] >= 0)
// {
// mL(i_dist, i_dist) = huge;
// rhs[i_dist] = 0.0;
// }
// }
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
if (mdistances[i_node] >= 0)
{
mL(i_node, i_node) = max_diag;
rhs[i_node] = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
mL(i_node, j_neighbour) = 0.0;
}
}
else
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if (mdistances[j_neighbour] >= 0)
mL(i_node, j_neighbour) = 0.0;
}
}
}
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// if( fabs(mL(i_node, i_node)) < 1e-20)
// {
// mL(i_node, i_node)=max_diag;
// rhs[i_node] = 0.0;
// KRATOS_WATCH("arghhhhhhhhhhhhhhhhhhhhhhhhhhhhhh");
// }
// }
//compute row scaling factors
TSystemVectorType scaling_factors(n_nodes);
double* Lvalues = mL.value_data().begin();
SizeType* Lrow_indices = mL.index1_data().begin();
SizeType* Lcol_indices = mL.index2_data().begin();
#pragma omp parallel for
for (int k = 0; k < static_cast< int>(mL.size1()); k++)
{
double t = 0.0;
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
for (SizeType j=col_begin; j<col_end; j++)
if( static_cast<int>(Lcol_indices[j]) == k)
{
t = fabs(Lvalues[j]);
break;
}
// t += Lvalues[j]*Lvalues[j];
// t = sqrt(t);
scaling_factors[k] = 1.0/sqrt(t);
}
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(mL.size1()); k++)
{
SizeType col_begin = Lrow_indices[k];
SizeType col_end = Lrow_indices[k+1];
double k_factor = scaling_factors[k];
rhs[k] *= k_factor;
for (SizeType j=col_begin; j<col_end; j++)
{
Lvalues[j] *= scaling_factors[Lcol_indices[j]] * k_factor;
}
}
//set starting vector for iterative solvers
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
dp[i_node] = 0.0;
pLinearSolver->Solve(mL, dp, rhs);
//update pressure
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
mPn1[i_node] += dp[i_node]*scaling_factors[i_node];
// for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++)
// {
// unsigned int i_node = mPressureOutletList[i_pressure];
// mPn1[i_node] = mPressureOutlet[i_pressure];
// }
//write pressure and density to Kratos
mr_matrix_container.WriteScalarToDatabase(PRESSURE, mPn1, rNodes);
//compute pressure proj for the next step
#pragma omp parallel for private(work_array)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& xi_i = mXi[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
xi_i[comp] = 0.0;
double dist = mdistances[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
const double& p_i = mPn1[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mPn1[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(xi_i, p_i, p_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
xi_i[l_comp] *= m_inv;
}
}
mr_matrix_container.WriteVectorToDatabase(PRESS_PROJ, mXi, rNodes);
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 3: correct fractional momentum
void SolveStep3()
{
KRATOS_TRY
//get number of nodes
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//define work array
array_1d<double, TDim> correction;
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
double factor = 0.5;
if(massume_constant_dp == true)
factor = 1.0;
//compute end of step momentum
double rho_inv = 1.0 / mRho;
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv,factor)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
double delta_p_i = (mPn1[i_node] - mPn[i_node]) * rho_inv*factor;
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//setting to zero
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
correction[l_comp] = 0.0;
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double delta_p_j = (mPn1[j_neighbour] - mPn[j_neighbour]) * rho_inv*factor;
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
// edge_ij.Sub_grad_p(correction,delta_p_i,delta_p_j);
edge_ij.Sub_grad_p(correction, delta_p_i, delta_p_j);
// edge_ij.Add_grad_p(correction, delta_p_i, delta_p_j);
// edge_ij.Add_Gp(correction,delta_p_i,delta_p_j);
// edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j);
}
//compute prefactor
// double coefficient = delta_t * m_inv;
const double m = mr_matrix_container.GetLumpedMass() [i_node];
const double& d = mdiag_stiffness[i_node];
//correct fractional momentum
for (unsigned int comp = 0; comp < TDim; comp++)
{
U_i_curr[comp] += delta_t / (m + delta_t*d) * correction[comp];
}
}
}
ApplyVelocityBC(mvel_n1);
//write velocity of time step n+1 to Kratos
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes);
//calculate the error on the divergence
if(muse_mass_correction == true)
{
#pragma omp parallel for private(correction) firstprivate(delta_t,rho_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
const double dist = mdistances[i_node];
double& div_i_err = mdiv_error[i_node];
div_i_err = 0.0;
if (dist < 0.0) //node is inside domain ---- if outside do nothing
{
const array_1d<double, TDim>& U_i_curr = mvel_n1[i_node];
//compute edge contributions dt*M^(-1)Gp
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim>& U_j_curr = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_D_v(div_i_err, U_i_curr*mRho, U_j_curr * mRho);
}
}
}
}
KRATOS_CATCH("")
}
//************************************
void ApplyVelocityBC(CalcVectorType& VelArray)
{
KRATOS_TRY
if(mWallLawIsActive == false)
{
//apply conditions on corner edges
int edge_size = medge_nodes_direction.size();
#pragma omp parallel for firstprivate(edge_size)
for (int i = 0; i < edge_size; i++)
{
int i_node = medge_nodes[i];
const array_1d<double, TDim>& direction = medge_nodes_direction[i];
double dist = mdistances[i_node];
if(dist <= 0.0)
{
array_1d<double, TDim>& U_i = VelArray[i_node];
double temp=0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
temp += U_i[comp] * direction[comp];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = direction[comp]*temp;
}
}
//apply conditions on corners
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
array_1d<double, TDim>& U_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] = 0.0;
}
}
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
if(dist <= 0.0)
{
array_1d<double, TDim>& U_i = VelArray[i_node];
array_1d<double, TDim>& an_i = mSlipNormal[i_node];
double projection_length = 0.0;
double normalization = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
normalization += an_i[comp] * an_i[comp];
}
projection_length /= normalization;
//tangential momentum as difference between original and normal momentum
for (unsigned int comp = 0; comp < TDim; comp++)
U_i[comp] -= projection_length * an_i[comp];
}
}
//fixed condition
int fixed_size = mFixedVelocities.size();
#pragma omp parallel for firstprivate(fixed_size)
for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++)
{
unsigned int i_node = mFixedVelocities[i_velocity];
double dist = mdistances[i_node];
if(dist <= 0.0)
{
const array_1d<double, TDim>& u_i_fix = mFixedVelocitiesValues[i_velocity];
array_1d<double, TDim>& u_i = VelArray[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
u_i[comp] = u_i_fix[comp];
}
}
KRATOS_CATCH("")
}
//********************************
//function to compute coefficients
void ExtrapolateValues(unsigned int extrapolation_layers)
{
KRATOS_TRY
//ensure that corner nodes are wet if all of the nodes around them have a negative distance
typedef Node < 3 > PointType;
typedef PointerVector<PointType > PointVector;
typedef PointVector::iterator PointIterator;
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances,mr_model_part.Nodes());
// mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi,mr_model_part.Nodes());
//
// //loop on all the slip nodes and Set the pressure projection to -BodyForce if it has neighbours with distance greater than 0
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++)
// {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// double dist = mdistances[i_node];
//
//
// if(dist <= 0.0)
// {
// int nout = 0;
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// //get global index of neighbouring node j
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// const double& dist_j = mdistances[j_neighbour];
//
// if(dist_j > 0)
// nout++;
// }
//
// if(nout > 0) mXi[i_node] += mRho*mBodyForce;
// }
// }
//
// mr_matrix_container.WriteVectorToDatabase(PRESS_PROJ, mXi,mr_model_part.Nodes());
//reset is visited flag
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//generate a container with the layers to be extrapolated
std::vector< PointVector > layers(extrapolation_layers);
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) < 0.0) //candidates are only the ones inside the fluid domain
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->FastGetSolutionStepValue(DISTANCE) >= 0.0) //add the node as free surface if one of its neighb is outside
{
if (inode->GetValue(IS_VISITED) == 0.0)
{
layers[0].push_back(*(inode.base()));
inode->GetValue(IS_VISITED) = 1.0;
}
}
}
}
else
{
//set everything to zero
noalias(inode->FastGetSolutionStepValue(VELOCITY)) = ZeroVector(3);
inode->FastGetSolutionStepValue(PRESSURE) = 0.0;
noalias(inode->FastGetSolutionStepValue(VELOCITY, 1)) = ZeroVector(3);
inode->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
noalias(inode->FastGetSolutionStepValue(PRESS_PROJ)) = ZeroVector(3);
noalias(inode->FastGetSolutionStepValue(PRESS_PROJ, 1)) = ZeroVector(3);
}
}
//fill the following layers by neighbour relationships
//each layer fills the following
for (unsigned int il = 0; il < extrapolation_layers - 1; il++)
{
for (PointIterator iii = (layers[il]).begin(); iii != (layers[il]).end(); iii++)
{
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator jjj = neighb_nodes.begin(); jjj != neighb_nodes.end(); jjj++) //destination = origin1 + value * Minv*origin
{
if (jjj->FastGetSolutionStepValue(DISTANCE) >= 0 &&
jjj->GetValue(IS_VISITED) == 0.0)
{
layers[il + 1].push_back(Node < 3 > ::Pointer(*(jjj.base())));
jjj->GetValue(IS_VISITED) = double(il + 2.0);
}
}
}
}
array_1d<double, 3 > aux, aux_proj;
//TESTING!!!
//fill the pressure projection on the first layer inside the fluid
//by extrapolating from the pressure projection on the layer -1 (the first layer completely inside the domain)
for (PointIterator iii = (layers[0]).begin(); iii != (layers[0]).end(); iii++)
{
noalias(aux_proj) = ZeroVector(3);
double avg_number = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) == 0.0) //the node will be considered for extrapolation only if completely inside
{
const array_1d<double, 3 > & inside_press_grad = i->FastGetSolutionStepValue(PRESS_PROJ);
noalias(aux_proj) += inside_press_grad;
avg_number += 1.0;
}
}
if (avg_number != 0.0) //this case means that it has some neighbours that are completely internal
{
aux_proj /= avg_number;
noalias(iii->FastGetSolutionStepValue(PRESS_PROJ)) = aux_proj;
}
else //case in which there is not a layer of nodes completely internal
{
array_1d<double,3>& pproj = iii->FastGetSolutionStepValue(PRESS_PROJ);
for(unsigned int i=0; i<TDim; i++)
pproj[i] = mRho*mBodyForce[i];
// noalias(iii->FastGetSolutionStepValue(PRESS_PROJ)) = mRho*mBodyForce;
}
}
//perform extrapolation layer by layer by making an average
//of the neighbours of lower order
for (unsigned int il = 1; il < extrapolation_layers; il++)
{
// std::cout << "layer " << il << std::endl;
for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++)
{
// std::cout << iii->Id() << " " << std::endl;
const array_1d<double, 3 > & coords_top = iii->Coordinates();
//extrapolate the average velocity
noalias(aux) = ZeroVector(3);
noalias(aux_proj) = ZeroVector(3);
double avg_number = 0.0;
double pavg = 0.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
if (i->GetValue(IS_VISITED) < (il + 1) && i->GetValue(IS_VISITED) != 0.0)
{
const array_1d<double, 3 > & coords_bottom = i->Coordinates();
array_1d<double, 3 > direction_vec = coords_top;
noalias(direction_vec) -= coords_bottom;
const array_1d<double, 3 > & press_grad = i->FastGetSolutionStepValue(PRESS_PROJ);
double temp = inner_prod(direction_vec, press_grad);
double pestimate = i->FastGetSolutionStepValue(PRESSURE,1) + temp;
pavg += pestimate;
noalias(aux_proj) += press_grad;
noalias(aux) += i->FastGetSolutionStepValue(VELOCITY);
avg_number += 1.0;
}
}
if (avg_number != 0.0)
{
aux /= avg_number;
pavg /= avg_number;
aux_proj /= avg_number;
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "error in extrapolation:: no neighbours find on a extrapolation layer -- impossible", "");
// KRATOS_THROW_ERROR(std:logic_error,"error in extrapolation:: no neighbours find on a extrapolation layer -- impossible","");
}
noalias(iii->FastGetSolutionStepValue(VELOCITY)) = aux;
noalias(iii->FastGetSolutionStepValue(VELOCITY, 1)) = aux;
iii->FastGetSolutionStepValue(PRESSURE, 1) = pavg;
noalias(iii->FastGetSolutionStepValue(PRESS_PROJ)) = aux_proj;
noalias(iii->FastGetSolutionStepValue(PRESS_PROJ, 1)) = aux_proj;
}
}
mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes());
// //on the first layer outside the pressure is set to a value such that on the free surface the pressure is approx 0
// for (PointIterator iii = layers[1].begin(); iii != layers[1].end(); iii++)
// {
// //get the node
// unsigned int i_node = iii->FastGetSolutionStepValue(AUX_INDEX);
//
// array_1d<double, TDim> grad_d;
// for (unsigned int comp = 0; comp < TDim; comp++)
// grad_d[comp] = 0.0;
//
// double dist_i = mdistances[i_node];
//
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// //get global index of neighbouring node j
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// const double& dist_j = mdistances[j_neighbour];
//
// //projection of pressure gradients
// CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//
// edge_ij.Add_grad_p(grad_d, dist_i, dist_j);
// }
//
// const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// grad_d[l_comp] *= m_inv;
//
// double norm_grad = norm_2(grad_d);
//
// if(norm_grad < 100.0)
// {
// grad_d /= norm_grad; //this is the direction of the gradient of the distances
//
// grad_d *= dist_i; //this is the vector with the distance of node_i from the closest point on the free surface
//
// const array_1d<double, TDim> press_grad = iii->FastGetSolutionStepValue(PRESS_PROJ);
// double pestimate = inner_prod(press_grad,grad_d);
//
// iii->FastGetSolutionStepValue(PRESSURE) = pestimate;
// }
// else
// {
// std::cout << "attention gradient of distance much greater than 1 on node:" << i_node <<std::endl;
// double avg_number = 0.0;
//
// double pavg = 0.0;
//
// GlobalPointersVector< Node < 3 > >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES);
// for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
// {
// if (i->GetValue(IS_VISITED) == 1) {
// pavg += i->FastGetSolutionStepValue(PRESSURE);
// avg_number += 1.0;
// }
// }
//
// if(avg_number == 0)
// KRATOS_THROW_ERROR(std::logic_error,"can not happen that the extrapolation node has no neighbours","");
//
// iii->FastGetSolutionStepValue(PRESSURE) = pavg/avg_number;
//
// }
//
// }
//
//
// //set the pressure to zero on the outer layers (>2)
// for (unsigned int il = 2; il < extrapolation_layers; il++)
// {
// for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++)
//
// {
// iii->FastGetSolutionStepValue(PRESSURE) = 0.0;
// }
// }
//mark nodes on which we will have to solve for convection
//mark all of internal nodes
ModelPart::NodesContainerType::iterator it_begin = mr_model_part.NodesBegin();
for (unsigned int i_node = 0; i_node < mr_model_part.Nodes().size(); i_node++)
{
ModelPart::NodesContainerType::iterator it = it_begin+i_node;
if(it->FastGetSolutionStepValue(DISTANCE) <= 0.0)
it->GetValue(IS_VISITED) = 1.0;
else
it->GetValue(IS_VISITED) = 0.0;
}
//now mark all of the nodes up to the extrapolation layers - 1
for (unsigned int il = 0; il < extrapolation_layers-1; il++)
for (PointIterator iii = layers[il].begin(); iii != layers[il].end(); iii++)
iii->GetValue(IS_VISITED) = 1.0;
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
ApplyVelocityBC(mvel_n1);
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
KRATOS_CATCH("")
}
void ChangeSignToDistance()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue(DISTANCE);
inode->FastGetSolutionStepValue(DISTANCE) = -dist;
}
KRATOS_CATCH("")
}
void MarkNodesByDistance(double min, double max)
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
double dist = inode->FastGetSolutionStepValue(DISTANCE);
if (dist > min && dist < max)
inode->GetValue(IS_VISITED) = 1.0;
else
inode->GetValue(IS_VISITED) = 0.0;
}
KRATOS_CATCH("")
}
void SaveScalarVariableToOldStep(Variable<double>& rVar)
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->FastGetSolutionStepValue(rVar, 1) = inode->FastGetSolutionStepValue(rVar);
}
KRATOS_CATCH("")
}
void MarkExternalAndMixedNodes()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) > 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
i->GetValue(IS_VISITED) = 1.0;
}
}
}
KRATOS_CATCH("")
}
void MarkInternalAndMixedNodes()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1.0;
GlobalPointersVector< Node < 3 > >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES);
for (GlobalPointersVector< Node < 3 > >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++)
{
i->GetValue(IS_VISITED) = 1.0;
}
}
}
KRATOS_CATCH("")
}
void MarkInternalNodes()
{
KRATOS_TRY
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
inode->GetValue(IS_VISITED) = 0.0;
}
//detect the nodes inside the fluid surface
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
if (inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain
{
inode->GetValue(IS_VISITED) = 1.0;
}
}
KRATOS_CATCH("")
}
//**************************************
//function to calculate the area normals
void CalculateNormals(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//2D case
if (TDim == 2)
{
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal2D(cond_it, area_normal);
}//3D case
else if (TDim == 3)
{
//help vectors for cross product
array_1d<double, 3 > v1;
array_1d<double, 3 > v2;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
CalculateNormal3D(cond_it, area_normal, v1, v2);
}
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
mInOutNormal.resize(n_nodes);
mSlipNormal.resize(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
noalias(mSlipNormal[i_node]) = ZeroVector(TDim);
mis_slip[i_node] = false;
noalias(mInOutNormal[i_node]) = ZeroVector(TDim);
}
//loop over all faces
const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
//slip condition
if (static_cast<bool>(cond_it->GetValue(IS_STRUCTURE)) == true)
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double, TDim>& slip_normal = mSlipNormal[i_node];
mis_slip[i_node] = true;
for (unsigned int comp = 0; comp < TDim; comp++)
{
slip_normal[comp] += node_factor * face_normal[comp];
}
}
}
//fill the list of slip nodes
std::vector< unsigned int> tempmSlipBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (mis_slip[i_node] == true)
tempmSlipBoundaryList.push_back(i_node);
mis_slip[i_node] = false;
}
mSlipBoundaryList.resize(tempmSlipBoundaryList.size(),false);
#pragma omp parallel for
for(int i=0; i<static_cast<int>(tempmSlipBoundaryList.size()); i++)
mSlipBoundaryList[i] = tempmSlipBoundaryList[i];
//loop over all faces to fill inlet outlet
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
//inlet or outlet condition
bool is_inlet_or_outlet = false;
if (cond_it->GetValue (IS_STRUCTURE) != true) is_inlet_or_outlet = true;
else
{
for (unsigned int if_node = 0; if_node < TDim; if_node++)
if (face_geometry[if_node].IsFixed (VELOCITY_X) )
is_inlet_or_outlet = true;
}
//slip condition
if (is_inlet_or_outlet) //the opposite of the loop before
for (unsigned int if_node = 0; if_node < TDim; if_node++)
{
unsigned int i_node = static_cast<unsigned int> (face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX));
array_1d<double, TDim>& inout_normal = mInOutNormal[i_node];
mis_slip[i_node] = true; //reutilize it!
for (unsigned int comp = 0; comp < TDim; comp++)
{
inout_normal[comp] += node_factor * face_normal[comp];
}
}
}
//fill the list of inlet outlet nodes nodes
std::vector< unsigned int> tempmInOutBoundaryList;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (mis_slip[i_node] == true)
tempmInOutBoundaryList.push_back(i_node);
}
mInOutBoundaryList.resize(tempmInOutBoundaryList.size(),false);
#pragma omp parallel for
for(int i=0; i<static_cast<int>(tempmInOutBoundaryList.size()); i++)
mInOutBoundaryList[i] = tempmInOutBoundaryList[i];
KRATOS_CATCH("")
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mViscosity.clear();
mWork.clear();
mvel_n.clear();
mvel_n1.clear();
mPn.clear();
mPn1.clear();
mHmin.clear();
mHavg.clear();
mSlipNormal.clear();
mNodalFlag.clear();
mFixedVelocities.clear();
mFixedVelocitiesValues.clear();
mPressureOutletList.clear();
// mPressureOutlet.clear();
mSlipBoundaryList.clear();
mL.clear();
mTauPressure.clear();
mTauConvection.clear();
mTau2.clear();
mBeta.clear();
mPiConvection.clear();
mphi_n.clear();
mphi_n1.clear();
mEps.clear();
mA.clear();
mB.clear();
mStrVel.clear();
mdiv_error.clear();
mdiag_stiffness.clear();
mis_slip.clear();
KRATOS_CATCH ("")
}
void ConvectDistance()
{
KRATOS_TRY
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
ValuesVectorType rhs, WorkConvection;
rhs.resize(n_nodes);
WorkConvection.resize(n_nodes);
ValuesVectorType active_nodes;
active_nodes.resize(n_nodes);
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
//read variables from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(DISTANCE, mphi_n, mr_model_part.Nodes());
//mr_matrix_container.AssignVectorToVector(mphi_n1, mphi_n); //mWork = mphi_n
// //chapuza
// //set the distance to zero when it tries to go out of the pressure boundary
// int pressure_size = mPressureOutletList.size();
// #pragma omp parallel for firstprivate(pressure_size)
// for (int iii = 0; iii < pressure_size; iii++)
// {
// unsigned int i_node = mPressureOutletList[iii];
// mphi_n1[i_node] = fabs(mphi_n1[i_node]);
// mphi_n[i_node] = fabs(mphi_n[i_node]);
// }
//create and fill a vector of nodes for which we want to convect the velocity
for (int i_node = 0; i_node < n_nodes; i_node++)
{
ModelPart::NodesContainerType::iterator it_begin = mr_model_part.NodesBegin();
active_nodes[i_node] = (it_begin + i_node)->GetValue(IS_VISITED);
}
// //calculating the convective projection
// array_1d<double, TDim> a_i;
// array_1d<double, TDim> a_j;
// #pragma omp parallel for private(a_i,a_j)
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
// double& pi_i = mPiConvection[i_node];
// const double& phi_i = mphi_n1[i_node];
// //set to zero the projection
// pi_i = 0.0;
// if (active_nodes[i_node] != 0.0)
// {
// a_i = mvel_n1[i_node];
// a_i /= mEps[i_node];
//
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// if (active_nodes[j_neighbour] != 0.0)
// {
// noalias(a_j) = mvel_n1[j_neighbour];
// a_j /= mEps[j_neighbour];
//
// const double& phi_j = mphi_n1[j_neighbour];
// CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
// edge_ij.Add_ConvectiveContribution(pi_i, a_i, phi_i, a_j, phi_j);
// }
// }
// //apply inverted mass matrix
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// pi_i *= m_inv;
// }
// }
//calculating the convective projection
array_1d<double, TDim> a_i;
array_1d<double, TDim> a_j;
#pragma omp parallel for private(a_i,a_j)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& pi_i = mPiConvection[i_node];
// setting to zero the projection
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] = 0.0;
/* if (active_nodes[i_node] != 0.0)
{*/
const double& phi_i = mphi_n1[i_node];
noalias(a_i) = mvel_n1[i_node];
a_i /= mEps[i_node];
// loop to all the edges surrounding node I
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
noalias(a_j) = mvel_n1[j_neighbour];
a_j /= mEps[j_neighbour];
const double& phi_j = mphi_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(pi_i, phi_i, phi_j);
}
// apply inverted mass matrix
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
pi_i[l_comp] *= m_inv;
// }
}
//calculating limitor
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
const array_1d<double, TDim>& pi_i = mPiConvection[i_node];
const double& p_i = mphi_n1[i_node];
double& beta_i = mBeta[i_node];
beta_i = 0.0;
double n = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& p_j = mphi_n1[j_neighbour];
const array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
const array_1d<double, TDim>& pi_j = mPiConvection[j_neighbour];
// double proj = 0.0;
// for (unsigned int comp = 0; comp < TDim; comp++)
// proj += 0.5*l_k[comp]*(pi_i[comp]+pi_j[comp]);
// double beta = fabs((p_i - p_j - proj)/(fabs(p_i-p_j)+fabs(proj)+1e-4));
double proj = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
proj += 0.5 * l_k[comp]*(pi_i[comp] + pi_j[comp]);
// proj += dir[comp]*pi_i[comp];
double numerator = fabs(fabs(p_j - p_i) - fabs(proj));
double denom = fabs(fabs(p_j - p_i) + 1e-6);
beta_i += numerator / denom;
n += 1.0;
}
beta_i /= n;
if (beta_i > 1.0)
beta_i = 1.0;
}
// mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, active_nodes, rNodes);
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
mr_matrix_container.AssignVectorToVector(mphi_n, WorkConvection); //mWork = mphi_n
//first step of Runge Kutta
// mr_matrix_container.AssignVectorToVector(mphi_n,mphi_n1); //mphi_n1 = mphi_n
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
//second step
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5 * delta_t, mr_matrix_container.GetInvertedMass(), rhs);
//third step
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 3.0, mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, delta_t, mr_matrix_container.GetInvertedMass(), rhs);
//fourth step
mr_matrix_container.SetToZero(rhs);
CalculateRHS_convection(mphi_n1, mvel_n1, rhs, active_nodes);
mr_matrix_container.Add_Minv_value(WorkConvection, WorkConvection, delta_t / 6.0, mr_matrix_container.GetInvertedMass(), rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector(WorkConvection, mphi_n1);
// // make sure that boundary nodes that are very close to the free surface get wet
// int slip_size = mSlipBoundaryList.size();
// #pragma omp parallel for firstprivate(slip_size)
// for (int i_slip = 0; i_slip < slip_size; i_slip++) {
// unsigned int i_node = mSlipBoundaryList[i_slip];
// const double& h_i = mHmin[i_node];
// double& dist_i = mphi_n1[i_node];
//
// if(dist_i > 0.0 && dist_i < 0.5*h_i)
// {
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mphi_n1[j_neighbour] <= 0.0)
// dist_i = -0.01 * h_i;
// }
// }
//
// }
// int fixed_size = mFixedVelocities.size();
// #pragma omp parallel for firstprivate(fixed_size)
// for (int i_velocity = 0; i_velocity < fixed_size; i_velocity++) {
// unsigned int i_node = mFixedVelocities[i_velocity];
// const double& h_i = mHmin[i_node];
// double& dist_i = mphi_n1[i_node];
//
// if(dist_i > 0.0 && dist_i < 0.5*h_i)
// {
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
// if(mphi_n1[j_neighbour] <= 0.0)
// dist_i = -0.01 * h_i;
// }
// }
// }
//wetten corner nodes if needed
int corner_size = mcorner_nodes.size();
for (int i = 0; i < corner_size; i++)
{
int i_node = mcorner_nodes[i];
bool to_be_wettened = true;
double min_dist = 0.0;
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
double neighb_dist = mphi_n1[j_neighbour];
if(min_dist > neighb_dist)
min_dist = neighb_dist;
if(neighb_dist >= 0.0)
{
to_be_wettened=false;
}
}
if(to_be_wettened==true)
mphi_n1[i_node] = min_dist;
}
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
KRATOS_CATCH("")
}
void ReduceTimeStep(ModelPart& rModelPart, double NewTime)
{
KRATOS_TRY
/*
double current_time = rModelPart.GetProcessInfo()[TIME];
double current_delta_time = rModelPart.GetProcessInfo()[DELTA_TIME];
double old_time = current_time - current_delta_time;
double new_reduced_time = NewTtime;
double new_delta_time = new_reduced_time - old_time;
rModelPart.GetProcessInfo()[TIME] = new_reduced_time;
rModelPart.GetProcessInfo()[DELTA_TIME] = new_delta_time;
//now copy the database from the old step on the top of the current step
int step_data_size = ThisModelPart.GetNodalSolutionStepDataSize();
double* current_data = (pnode)->SolutionStepData().Data(0);
double* old_data = (pnode)->SolutionStepData().Data(1);
for (int j = 0; j < step_data_size; j++)
current_data[j] = old_data[j];
*/
rModelPart.OverwriteSolutionStepData(1, 0);
rModelPart.GetProcessInfo().SetCurrentTime(NewTime);
KRATOS_CATCH("error in reducing the time step")
}
bool CheckDistanceConvection()
{
int n_large_distance_gradient = 0;
array_1d<double, TDim> grad_d;
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//calculate gradient of distance on the nodes and count occurrences of large gradients (that indicate a failure)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist <= 0.0)
{
for (unsigned int comp = 0; comp < TDim; comp++)
grad_d[comp] = 0.0;
double dist_i = mdistances[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& dist_j = mdistances[j_neighbour];
//projection of pressure gradients
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_grad_p(grad_d, dist_i, dist_j);
}
const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
grad_d[l_comp] *= m_inv;
double norm_grad = norm_2(grad_d);
if (norm_grad > 1.5) //large gradient found
n_large_distance_gradient += 1;
}
}
if (n_large_distance_gradient != 0)
{
bool success = false;
return success;
}
else
{
bool success = true;
return success;
}
}
void ActivateWallResistance(double Ywall)
{
mWallLawIsActive = true;
mY_wall = Ywall;
}
double ComputeVolumeVariation()
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double dt = CurrentProcessInfo[DELTA_TIME];
//slip condition
int inout_size = mInOutBoundaryList.size();
double vol_var = 0.0;
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < inout_size; i++)
{
unsigned int i_node = mInOutBoundaryList[i];
double dist = mdistances[i_node];
if (dist <= 0.0)
{
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const array_1d<double, TDim>& an_i = mInOutNormal[i_node];
double projection_length = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
projection_length += U_i[comp] * an_i[comp];
}
vol_var += projection_length;
}
}
return vol_var * dt;
}
double ComputeWetVolume()
{
KRATOS_TRY
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
//slip condition
double wet_volume = 0.0;
//#pragma omp parallel for firstprivate(slip_size)
for (int i = 0; i < static_cast<int>(mdistances.size()); i++)
{
double dist = mdistances[i];
const double m_inv = mr_matrix_container.GetInvertedMass()[i];
if (dist <= 0.0)
{
wet_volume += 1.0 / m_inv;
}
}
return wet_volume;
KRATOS_CATCH("");
}
void DiscreteVolumeCorrection(double expected_volume, double measured_volume)
{
// std::cout << "measured_volume: " << measured_volume << ", expected_volume: " << expected_volume << std::endl;
double volume_error = expected_volume - measured_volume;
if (measured_volume < expected_volume)
{
double layer_volume = 0.0;
std::vector<unsigned int> first_outside;
int n_nodes = mdistances.size();
// find list of the first nodes outside of the fluid and compute their volume
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist > 0.0) //node is outside domain
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] <= 0.0)
{
const double nodal_mass = 1.0 / mr_matrix_container.GetInvertedMass()[i_node];
if(nodal_mass < volume_error - layer_volume)
{
first_outside.push_back(i_node);
layer_volume += nodal_mass;
}
//const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
//layer_volume += 1.0/m_inv;
}
}
}
}
// std::cout << ", layer_volume: " << layer_volume << std::endl;
// if (measured_volume + layer_volume <= expected_volume)
{
// mark the nodes in the outside layer with a small negative distance
for(unsigned int i=0; i<first_outside.size(); i++)
{
unsigned int i_node = first_outside[i];
mdistances[i_node] = -mHavg[i_node];
}
}
}
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
}
void PushFreeSurface()
{
//double layer_volume = 0.0;
std::vector<unsigned int> first_outside;
int n_nodes = mdistances.size();
//find list of the first nodes outside of the fluid and compute their volume
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
if (dist > 0.0) //node is outside domain
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if(mdistances[j_neighbour] <= 0.0)
{
//mark the nodes in the outside layer with a small negative distance
mdistances[i_node] = -mHavg[i_node];
}
}
}
}
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mdistances, mr_model_part.Nodes());
}
//***************************************
//function to set adequate time step size
double ComputeBoundedTimeStep(const double CFLNumber, const double MaxDt)
{
KRATOS_TRY
//save the maximum time step
max_dt = MaxDt;
//local variable for time step size
double delta_t = 1e10;//max_dt;
mdelta_t_avg = 1e10;//max_dt;
//getting value of current velocity and of viscosity
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, mr_model_part.Nodes());
// mr_matrix_container.FillVectorFromDatabase(PRESS_PROJ, mXi, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, mr_model_part.Nodes());
// mr_matrix_container.FillScalarFromDatabase(DIAMETER, mD, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes());
mr_matrix_container.FillVectorFromDatabase(STRUCTURE_VELOCITY, mStrVel, mr_model_part.Nodes());
// double delta_t_i = delta_t;
//*******************
//loop over all nodes
double n_nodes = mvel_n1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, TDim>& v_i = mvel_n1[i_node];
const double havg_i = mHavg[i_node];
const double hmin_i = mHmin[i_node];
const double eps_i = mEps[i_node];
const double nu_i = mViscosity[i_node];
// const double d_i = mD[i_node];
// const double lindarcy_i = mA[i_node];
// const double nonlindarcy_i = mB[i_node];
// double vel_norm = norm_2(v_i);
double vel_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
vel_norm += v_i[l_comp]*v_i[l_comp];
}
vel_norm = sqrt(vel_norm);
// const array_1d<double, TDim>& str_v_i = mStrVel[i_node];
// array_1d<double, TDim> rel_vel_i;
// for(unsigned int comp = 0; comp < TDim; comp++)
// {rel_vel_i[comp] = v_i[comp] - str_v_i[comp];}
// double rel_vel_norm = norm_2(rel_vel_i);
//// double porosity_coefficient = ComputePorosityCoefficient(mViscosity, vel_norm, eps_i, d_i);
// double porosity_coefficient = ComputePorosityCoefficient(rel_vel_norm, eps_i, lindarcy_i, nonlindarcy_i);
/*KRATOS_WATCH("porosity_coefficient ----------- Timestep")
KRATOS_WATCH(porosity_coefficient)*/
vel_norm /= eps_i;
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (2.0 * vel_norm /hmin_i + 4.0 * nu_i / (hmin_i * hmin_i) /*+ porosity_coefficient*/);
double delta_t_i_avg = 1.0 / (2.0 * vel_norm /havg_i + 4.0 * nu_i / (havg_i * havg_i) /*+ porosity_coefficient*/);
if(delta_t_i < 10e-8) //NO PHYSICS AT ALL!!!!! bounding the delata_t to 10e-08 by reducing the velocity!!
{
//std::cout << "NO PHYSICS AT ALL!!!!! bounding the delata_t to 10e-08 by reducing the velocity!!" << std::endl;
//KRATOS_WATCH(delta_t_i)
v_i *= delta_t_i / 10e-8;
delta_t_i = 10e-8;
}
if(delta_t_i_avg < 10e-8) //NO PHYSICS AT ALL!!!!! bounding the delta_t_i_avg to 10e-08 by reducing the velocity!!
{
//std::cout << "NO PHYSICS AT ALL!!!!! bounding the delta_t_i_avg to 10e-08 by reducing the velocity!!" << std::endl;
//KRATOS_WATCH(delta_t_i_avg)
v_i *= delta_t_i_avg / 10e-8;
delta_t_i_avg = 10e-8;
}
//considering the most restrictive case of neighbor's velocities with similar direction but opposite sense.
//loop over all neighbours
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
//get global index of neighbouring node j
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, TDim>& v_j = mvel_n1[j_neighbour];
double v_diff_norm = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
{
double temp = v_i[l_comp] - v_j[l_comp];
v_diff_norm += temp*temp;
}
v_diff_norm = sqrt(v_diff_norm);
v_diff_norm /= eps_i;
double delta_t_j = CFLNumber * 1.0 / (2.0 * v_diff_norm /hmin_i + 4.0 * nu_i / (hmin_i * hmin_i));
if(delta_t_j < 10e-8) //NO PHYSICS AT ALL!!!!! bounding the delata_t to 10e-08 by reducing the velocity!!
{
//std::cout << "NO PHYSICS AT ALL!!!!! bounding the delta_t_j to 10e-08 by reducing the velocity!!" << std::endl;
//KRATOS_WATCH(delta_t_j)
v_j *= delta_t_j / 10e-8;
delta_t_j = 10e-8;
}
if (delta_t_j < delta_t_i)
delta_t_i = delta_t_j;
// if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0))
// {
// double delta_t_j = CFLNumber * 1.0 / (2.0 * norm_2(v_diff) /hmin_i + 4.0 * mViscosity / (hmin_i * hmin_i));
//// double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par)) / mHmin[i_node] + 2.0 * mViscosity / (mHmin[i_node] * mHmin[i_node]));
// // KRATOS_WATCH(delta_t_j);
// // KRATOS_WATCH(delta_t_i);
// if (delta_t_j < delta_t_i)
// delta_t_i = delta_t_j;
// }
}
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
if(delta_t_i_avg < mdelta_t_avg)
mdelta_t_avg = delta_t_i_avg;
}
//*******************
//perform MPI syncronization of the dt (minimum should be kept)
if(delta_t <= 10-7) // writing back the changed velocities
mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes());
return delta_t;
KRATOS_CATCH("")
}
void CalculatePorousResistanceLaw(unsigned int res_law)
{
// const double nu_i = mViscosity;
if(res_law == 1)
{
/* if the chosen resistance law is ERGUN calculate Ergun A and B*/
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
const double eps = inode->FastGetSolutionStepValue(POROSITY);/*reading from kratos database*/
const double d = inode->FastGetSolutionStepValue(DIAMETER);/*reading from kratos database*/
const double nu = inode->FastGetSolutionStepValue(VISCOSITY);/*reading from kratos database*/
double& a = inode-> FastGetSolutionStepValue(LIN_DARCY_COEF);/*changing kratos database*/
double& b = inode-> FastGetSolutionStepValue(NONLIN_DARCY_COEF);/*changing kratos database*/
if(eps < 1.0)
{
double k_inv = 150.0 * (1.0 - eps)*(1.0 - eps) / (eps * eps * eps * d * d);
a = nu * k_inv;
b = (1.75 / eps) * sqrt(k_inv / (150.0 * eps));
}
else
{
a = 0.0;
b = 0.0;
}
}
}
else
{
/* whether it is a Custom Resistance law or NO resistance law is present ---> set to zero A and B for non porous nodes*/
for (ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin();
inode != mr_model_part.NodesEnd();
inode++)
{
const double eps = inode->FastGetSolutionStepValue(POROSITY); /*reading from kratos database*/
double& a = inode-> FastGetSolutionStepValue(LIN_DARCY_COEF); /*changing kratos database*/
double& b = inode-> FastGetSolutionStepValue(NONLIN_DARCY_COEF); /*changing kratos database*/
if(eps == 1.0)
{
a = 0.0;
b = 0.0;
}
}
}
mr_matrix_container.FillScalarFromDatabase(LIN_DARCY_COEF, mA, mr_model_part.Nodes()); /*filling edgebased database reading from kratos database*/
mr_matrix_container.FillScalarFromDatabase(NONLIN_DARCY_COEF, mB, mr_model_part.Nodes()); /*filling edgebased database reading from kratos database*/
}
private:
double mMolecularViscosity;
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
bool muse_mass_correction;
//parameters controlling the wall law
bool mWallLawIsActive;
double mY_wall;
//parameters for controlling the usage of the delta time in the stabilization
double mstabdt_pressure_factor;
double mstabdt_convection_factor;
double medge_detection_angle;
double mtau2_factor;
bool massume_constant_dp;
//nodal values
ValuesVectorType mViscosity;
//velocity vector U at time steps n and n+1
CalcVectorType mWork, mvel_n, mvel_n1, mx;
//pressure vector p at time steps n and n+1
ValuesVectorType mPn, mPn1;
//coefficients
ValuesVectorType mdistances;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
ValuesVectorType mHavg;
CalcVectorType mEdgeDimensions;
//area normal
CalcVectorType mSlipNormal;
CalcVectorType mInOutNormal;
//projection terms
CalcVectorType mPi, mXi;
//flag for first time step
bool mFirstStep;
//flag to differentiate interior and boundary nodes
ValuesVectorType mNodalFlag;
//lists of nodes with different types of boundary conditions
IndicesVectorType mSlipBoundaryList, mPressureOutletList, mFixedVelocities, mInOutBoundaryList;
CalcVectorType mFixedVelocitiesValues;
// ValuesVectorType mPressureOutlet;
//intrinsic time step size
ValuesVectorType mTauPressure;
ValuesVectorType mTauConvection;
ValuesVectorType mTau2;
ValuesVectorType mdiv_error;
std::vector<bool> mis_slip;
//variables for resolving pressure equation
//laplacian matrix
TSystemMatrixType mL;
//constant variables
double mRho;
//double mViscosity;
array_1d<double, TDim> mBodyForce;
//variables for convection
ValuesVectorType mphi_n;
ValuesVectorType mphi_n1;
CalcVectorType mPiConvection;
ValuesVectorType mBeta;
//variables for edge BCs
IndicesVectorType medge_nodes;
CalcVectorType medge_nodes_direction;
IndicesVectorType mcorner_nodes;
ValuesVectorType mEps;
ValuesVectorType mdiag_stiffness;
// ValuesVectorType mD;
ValuesVectorType mA;
ValuesVectorType mB;
CalcVectorType mStrVel;
double mdelta_t_avg;
double max_dt;
double mshock_coeff;
//***********************************************************
//functions to calculate area normals for boundary conditions
void CalculateNormal2D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y();
area_normal[1] = -(face_geometry[1].X() - face_geometry[0].X());
area_normal[2] = 0.00;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
void CalculateNormal3D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double, 3 > & area_normal, array_1d<double, 3 > & v1, array_1d<double, 3 > & v2)
{
Geometry<Node < 3 > >& face_geometry = (cond_it)->GetGeometry();
v1[0] = face_geometry[1].X() - face_geometry[0].X();
v1[1] = face_geometry[1].Y() - face_geometry[0].Y();
v1[2] = face_geometry[1].Z() - face_geometry[0].Z();
v2[0] = face_geometry[2].X() - face_geometry[0].X();
v2[1] = face_geometry[2].Y() - face_geometry[0].Y();
v2[2] = face_geometry[2].Z() - face_geometry[0].Z();
MathUtils<double>::CrossProduct(area_normal, v1, v2);
area_normal *= -0.5;
noalias((cond_it)->GetValue(NORMAL)) = area_normal;
}
//*********************************************************
//function to calculate minimum length of surrounding edges
void CalculateEdgeLengths(ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = rNodes.size();
//reserve memory for storage of nodal coordinates
std::vector< array_1d<double, 3 > > position;
position.resize(n_nodes);
//get position of all nodes
for (typename ModelPart::NodesContainerType::iterator node_it = rNodes.begin(); node_it != rNodes.end(); node_it++)
{
//get the global index of the node
unsigned int i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX));
//save its coordinates locally
noalias(position[i_node]) = node_it->Coordinates();
//initialize minimum edge length with relatively big values
mHmin[i_node] = 1e10;
}
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
}
//take unstructured meshes into account
if (TDim == 2)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = sqrt(2.0 * m_i);
}
}
else if (TDim == 3)
{
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHavg[i_node];
double& m_i = mr_matrix_container.GetLumpedMass()[i_node];
// double& rho_i = mRho[i_node];
h_i = pow(6.0 * m_i, 1.0 / 3.0);
}
}
//compute edge coordinates
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
array_1d<double, 3 > & pos_i = position[i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
array_1d<double, 3 > & pos_j = position[j_neighbour];
array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index];
for (unsigned int comp = 0; comp < TDim; comp++)
l_k[comp] = pos_i[comp] - pos_j[comp];
}
}
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS_convection(
const ValuesVectorType& mphi,
const CalcVectorType& convective_velocity,
ValuesVectorType& rhs,
ValuesVectorType& active_nodes
)
{
KRATOS_TRY
int n_nodes = mphi.size();
// //calculating the convective projection
//#pragma omp parallel for
// for (int i_node = 0; i_node < n_nodes; i_node++)
// {
//
// double& pi_i = mPiConvection[i_node];
// const double& phi_i = mphi[i_node];
//
// //set to zero the projection
// pi_i = 0;
// if (active_nodes[i_node] != 0.0)
// {
//
// const array_1d<double, TDim>& a_i = convective_velocity[i_node];
//
// //loop to all the edges surrounding node I
// for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
// {
// unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//
// if (active_nodes[j_neighbour] != 0.0)
// {
// const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
// const double& phi_j = mphi[j_neighbour];
//
// CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//
// edge_ij.Add_ConvectiveContribution(pi_i, a_i, phi_i, a_j, phi_j);
// }
// }
//
// //apply inverted mass matrix
// const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
// pi_i *= m_inv;
// }
// // KRATOS_WATCH(pi_i);
// // num = fabs(num);
// // if(num > norm_vI*0.0001)
// // mBeta[i_node] = 1.0 - num/denom;
// // else
// // mBeta[i_node] = 1.0;
//
// }
//perform MPI syncronization
//calculating the RHS
double stab_low;
double stab_high;
array_1d<double, TDim> a_i;
array_1d<double, TDim> a_j;
#pragma omp parallel for private(stab_low,stab_high,a_i,a_j)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
const double& h_i = mHavg[i_node];
const double& phi_i = mphi[i_node];
noalias(a_i) = convective_velocity[i_node];
a_i /= mEps[i_node];
const array_1d<double, TDim>& proj_i = mPiConvection[i_node];
// const double& pi_i = mPiConvection[i_node];
double pi_i = proj_i[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
pi_i += proj_i[l_comp] * a_i[l_comp];
// double beta = mBeta[i_node];
rhs_i = 0.0;
if (active_nodes[i_node] != 0.0)
{
const double& beta = mBeta[i_node];
double norm_a = a_i[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
norm_a += a_i[l_comp] * a_i[l_comp];
norm_a = sqrt(norm_a);
//loop to all the edges surrounding node I
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
if (active_nodes[j_neighbour] != 0.0)
{
//double& rhs_j = rhs[j_neighbour];
const double& phi_j = mphi[j_neighbour];
noalias(a_j) = convective_velocity[j_neighbour];
a_j /= mEps[j_neighbour];
// const double& pi_j = mPiConvection[j_neighbour];
const array_1d<double, TDim>& proj_j = mPiConvection[j_neighbour];
double pi_j = proj_j[0] * a_i[0];
for (unsigned int l_comp = 1; l_comp < TDim; l_comp++)
pi_j += proj_j[l_comp] * a_i[l_comp];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//convection operator
edge_ij.Sub_ConvectiveContribution(rhs_i, a_i, phi_i, a_j, phi_j); //esto funciona
// edge_ij.Sub_D_v(rhs_i, a_i*phi_i, a_i*phi_j);
//calculate stabilization part
edge_ij.CalculateConvectionStabilization_LOW(stab_low, a_i, phi_i, a_j, phi_j);
double edge_tau = mTauConvection[i_node];
edge_ij.CalculateConvectionStabilization_HIGH(stab_high, a_i, pi_i, a_j, pi_j);
edge_ij.Sub_StabContribution(rhs_i, edge_tau, 1.0, stab_low, stab_high);
double coeff = 0.5 * mshock_coeff; //=0.7*0.5;
double laplacian_ij = 0.0;
edge_ij.CalculateScalarLaplacian(laplacian_ij);
double capturing = laplacian_ij * (phi_j - phi_i);
// rhs_i-= coeff*capturing*beta*norm_a*h_i;
double aaa = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
aaa += a_i[k_comp] * a_i[m_comp] * edge_ij.LaplacianIJ(k_comp, m_comp);
if (norm_a > 1e-10)
{
aaa /= (norm_a * norm_a);
double capturing2 = aaa * (phi_j - phi_i);
if (fabs(capturing) > fabs(capturing2))
rhs_i -= coeff * (capturing - capturing2) * beta * norm_a * h_i;
}
}
}
}
// KRATOS_WATCH(rhs_i);
}
KRATOS_CATCH("")
}
//**************************************
void CornerDectectionHelper(Geometry< Node < 3 > >& face_geometry,
const array_1d<double, 3 > & face_normal,
const double An,
const GlobalPointersVector<Condition>& neighb,
const unsigned int i1,
const unsigned int i2,
const unsigned int neighb_index,
std::vector<unsigned int>& edge_nodes,
CalcVectorType& cornern_list
)
{
double acceptable_angle = 45.0 / 180.0 * 3.1; //angles of less than 45 deg will be accepted
double acceptable_cos = cos(acceptable_angle);
if (face_geometry[i1].Id() < face_geometry[i2].Id()) //we do this to add the face ones
{
const array_1d<double, 3 > & neighb_normal = neighb[neighb_index].GetValue(NORMAL);
double neighb_An = norm_2(neighb_normal);
double cos_normal = 1.0 / (An * neighb_An) * inner_prod(face_normal, neighb_normal);
//if the angle is too big between the two normals then the edge in the middle is a corner
if (cos_normal < acceptable_cos)
{
array_1d<double, 3 > edge = face_geometry[i2].Coordinates() - face_geometry[i1].Coordinates();
double temp = norm_2(edge);
edge /= temp;
int index1 = face_geometry[i1].FastGetSolutionStepValue(AUX_INDEX);
int index2 = face_geometry[i2].FastGetSolutionStepValue(AUX_INDEX);
edge_nodes[index1] += 1;
edge_nodes[index2] += 1;
// double sign1 = inner_prod(cornern_list[index1], edge);
double sign1 = 0.0;
for(unsigned int i = 0 ; i < edge.size() ; i++)
{sign1 += cornern_list[index1][i]*edge[i];}
if (sign1 >= 0)
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index1][i] += edge[i];
}
else
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index1][i] -= edge[i];
}
double sign2 = inner_prod(cornern_list[index2], edge);
if (sign2 >= 0)
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index2][i] += edge[i];
}
else
{ for(unsigned int i = 0 ; i < edge.size() ; i++)
cornern_list[index2][i] -= edge[i];
}
}
}
}
//function to calculate the area normals
void DetectEdges3D(ModelPart::ConditionsContainerType& rConditions)
{
KRATOS_TRY
//calculate area normals face-by-face
array_1d<double, 3 > area_normal;
//(re)initialize normals
unsigned int n_nodes = mNodalFlag.size();
std::vector<unsigned int> temp_edge_nodes(n_nodes);
CalcVectorType temp_cornern_list(n_nodes);
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
temp_edge_nodes[i_node] = 0.0;
noalias(temp_cornern_list[i_node]) = ZeroVector(TDim);
}
//loop over all faces
// const double node_factor = 1.0 / TDim;
for (ModelPart::ConditionsContainerType::iterator cond_it = rConditions.begin(); cond_it != rConditions.end(); cond_it++)
{
//get geometry data of the face
Geometry<Node < 3 > >& face_geometry = cond_it->GetGeometry();
//reference for area normal of the face
const array_1d<double, 3 > & face_normal = cond_it->GetValue(NORMAL);
double An = norm_2(face_normal);
unsigned int current_id = cond_it->Id();
//slip condition
if (cond_it->GetValue(IS_STRUCTURE) == 1.0) //this is a slip face --> now look for its neighbours
{
const GlobalPointersVector<Condition>& neighb = cond_it->GetValue(NEIGHBOUR_CONDITIONS);
//check for neighbour zero
if (neighb[0].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 1, 2, 0, temp_edge_nodes, temp_cornern_list);
//check for neighbour one
if (neighb[1].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 2, 0, 1, temp_edge_nodes, temp_cornern_list);
//check for neighbour two
if (neighb[2].Id() != current_id) //check if the neighbour exists
CornerDectectionHelper(face_geometry, face_normal, An, neighb, 0, 1, 2, temp_edge_nodes, temp_cornern_list);
}
}
// ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
// mr_matrix_container.WriteVectorToDatabase(ACCELERATION, temp_cornern_list, rNodes);
//fill the list of edge_nodes
std::vector<unsigned int> tempmedge_nodes;
std::vector< array_1d<double,TDim> > tempmedge_nodes_direction;
std::vector<unsigned int> tempmcorner_nodes;
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
if (temp_edge_nodes[i_node] == 2) //node is a edge_node
{
tempmedge_nodes.push_back(i_node);
array_1d<double, TDim>& node_edge = temp_cornern_list[i_node];
node_edge /= norm_2(node_edge);
tempmedge_nodes_direction.push_back(node_edge);
}
else if (temp_edge_nodes[i_node] > 2)
tempmcorner_nodes.push_back(i_node);
}
medge_nodes.resize(tempmedge_nodes.size(),false);
medge_nodes_direction.resize(tempmedge_nodes_direction.size(),false);
mcorner_nodes.resize(tempmcorner_nodes.size(),false);
#pragma omp parallel for
for ( int i = 0; i < static_cast<int>(tempmedge_nodes.size()); i++)
{
medge_nodes[i] = tempmedge_nodes[i];
medge_nodes_direction[i] = tempmedge_nodes_direction[i];
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(tempmcorner_nodes.size()); i++)
{
mcorner_nodes[i] = tempmcorner_nodes[i];
}
for (int i = 0; i < static_cast<int>(mcorner_nodes.size()); i++)
{
KRATOS_WATCH(mcorner_nodes[i]);
}
KRATOS_CATCH("")
}
// double ComputePorosityCoefficient(const double& viscosity, const double& vel_norm, const double& eps, const double& d)
// {
// // const double d = 0.01; //to be changed
// double linear;
// double non_linear;
// if (eps < 1.0)
// {
// double k_inv = 150.0 * (1.0 - eps)*(1.0 - eps) / (eps * eps * eps * d * d);
// linear = eps * viscosity * k_inv; // eps * Ai
// non_linear = (1.75 * vel_norm) * sqrt(k_inv / (150.0 * eps)); //eps * Bi * vel_norm
// // double linear = viscosity * k_inv;
// // double non_linear = (1.75 * vel_norm / eps) * sqrt(k_inv / (150.0 * eps));
// } else
// {
// linear = 0.0;
// non_linear = 0.0;
// }
// return linear + non_linear;
// }
double ComputePorosityCoefficient(const double& vel_norm, const double& eps, const double& a, const double& b)
{
double linear;
double non_linear;
// if (eps < 1.0) /*this check has been already done in calculating the resistance law*/
// {
linear = eps * a;
non_linear = eps * b * vel_norm;
// } else
// {
// linear = 0.0;
// non_linear = 0.0;
// }
return linear + non_linear;
}
// double ComputeStructureContributionToPorosityCoefficient(const double& fluid_vel, const double& str_vel, const double& str_vel_norm, const double& eps, const double& a, const double& b)
// {
//
//
// }
void LaplacianSmooth(ValuesVectorType& to_be_smoothed, ValuesVectorType& aux)
{
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double dist = mdistances[i_node];
double correction = 0.0;
const double& origin_i = to_be_smoothed[i_node];
if (dist <= 0.0) //node is inside domain ---- if outside do nothing
{
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex()[i_node]; csr_index != mr_matrix_container.GetRowStartIndex()[i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const double& origin_j = to_be_smoothed[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
double l_ikjk;
edge_ij.CalculateScalarLaplacian(l_ikjk);
correction += l_ikjk * (origin_j - origin_i);
}
}
aux[i_node] = origin_i - correction;
}
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
to_be_smoothed[i_node] = aux[i_node];
}
void ComputeWallResistance(
const CalcVectorType& vel,
ValuesVectorType& diag_stiffness
// CalcVectorType& rhs
)
{
//parameters:
double k = 0.41;
double B = 5.1;
double toll = 1e-6;
double ym = mY_wall; //0.0825877; //0.0093823
double y_plus_incercept = 10.9931899;
unsigned int itmax = 100;
if (mViscosity[0] == 0)
KRATOS_THROW_ERROR(std::logic_error, "it is not possible to use the wall law with 0 viscosity", "");
//slip condition
int slip_size = mSlipBoundaryList.size();
#pragma omp parallel for firstprivate(slip_size,B,toll,ym,y_plus_incercept,itmax)
for (int i_slip = 0; i_slip < slip_size; i_slip++)
{
unsigned int i_node = mSlipBoundaryList[i_slip];
double dist = mdistances[i_node];
const double nu = mViscosity[i_node];
if (dist <= 0.0)
{
//array_1d<double, TDim>& rhs_i = rhs[i_node];
const array_1d<double, TDim>& U_i = vel[i_node];
const array_1d<double, TDim>& an_i = mSlipNormal[i_node];
//compute the modulus of the velocity
double mod_vel = 0.0;
double area = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
mod_vel += U_i[comp] * U_i[comp];
area += an_i[comp] * an_i[comp];
}
mod_vel = sqrt(mod_vel);
area = sqrt(area);
diag_stiffness[i_node] += area * mod_vel /pow(1.0/k*log(100.00) + B,2);/* * mWallReductionFactor[ i_node ];*/
//now compute the skin friction
double mod_uthaw = sqrt(mod_vel * nu / ym);
const double y_plus = ym * mod_uthaw / nu;
if (y_plus > y_plus_incercept)
{
//begin cicle to calculate the real u_thaw's module:
unsigned int it = 0;
double dx = 1e10;
// KRATOS_WATCH(fabs(dx));
while (fabs(dx) > toll * mod_uthaw && it < itmax)
{
double a = 1.0 / k;
double temp = a * log(ym * mod_uthaw / nu) + B;
double y = mod_uthaw * (temp) - mod_vel;
double y1 = temp + a;
dx = y / y1;
mod_uthaw -= dx;
it = it + 1;
}
if (it == itmax)
std::cout << "attention max number of iterations exceeded in wall law computation" << std::endl;
}
// else
// {
// for (unsigned int comp = 0; comp < TDim; comp++)
// rhs_i[comp] -= U_i[comp] * area * mu / (density*ym) ;
// }
/* if (mod_vel > 1e-12)
for (unsigned int comp = 0; comp < TDim; comp++)
rhs_i[comp] -= U_i[comp] * area * mod_uthaw * mod_uthaw / (mod_vel);
*/
}
else
diag_stiffness[i_node] += 0.0;
}
}
void ApplySmagorinsky3D (double MolecularViscosity, double Cs)
{
KRATOS_TRY
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
//calculating the RHS
array_1d<double, TDim> grad_vx;
array_1d<double, TDim> grad_vy;
array_1d<double, TDim> grad_vz;
int n_nodes = rNodes.size();
mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
array_1d<double, TDim> stab_high;
#pragma omp parallel for private(grad_vx,grad_vy,grad_vz)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] = 0.0 ;
grad_vy[comp] = 0.0 ;
grad_vz[comp] = 0.0 ;
}
//compute node by node the gradients
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const double h = mHmin[i_node];
const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (grad_vx, U_i[0], U_j[0]);
edge_ij.Add_grad_p (grad_vy, U_i[1], U_j[1]);
edge_ij.Add_grad_p (grad_vz, U_i[2], U_j[2]);
}
//finalize computation of the gradients
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] *= m_inv ;
grad_vy[comp] *= m_inv ;
grad_vz[comp] *= m_inv ;
}
//symmetrize and multiply by 2
grad_vx[0] *= 2.0;
grad_vy[1] *= 2.0;
grad_vz[2] *= 2.0;
grad_vx[1] += grad_vy[0];
grad_vx[2] += grad_vz[0];
grad_vy[2] += grad_vz[1];
grad_vy[0] += grad_vx[1];
grad_vz[0] += grad_vx[2];
grad_vz[1] += grad_vy[2];
//compute smagorinsky term
double aux = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
aux += grad_vx[comp] * grad_vx[comp] ;
aux += grad_vy[comp] * grad_vy[comp] ;
aux += grad_vz[comp] * grad_vz[comp] ;
}
aux *= 0.5;
if (aux < 0.0 ) aux=0.0;
double turbulent_viscosity = Cs*h*h*sqrt (aux) /**MolecularViscosity*/;
mViscosity[i_node] = turbulent_viscosity + MolecularViscosity;
}
mr_matrix_container.WriteScalarToDatabase (VISCOSITY, mViscosity, rNodes);
KRATOS_CATCH ("");
}
void ApplySmagorinsky2D (double MolecularViscosity, double Cs)
{
KRATOS_TRY
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
//calculating the RHS
array_1d<double, TDim> grad_vx;
array_1d<double, TDim> grad_vy;
// array_1d<double, TDim> grad_vz;
int n_nodes = rNodes.size();
mr_matrix_container.FillVectorFromDatabase (VELOCITY, mvel_n1, rNodes);
array_1d<double, TDim> stab_high;
#pragma omp parallel for private(grad_vx,grad_vy)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] = 0.0 ;
grad_vy[comp] = 0.0 ;
// grad_vz[comp] = 0.0 ;
}
//compute node by node the gradients
const array_1d<double, TDim>& U_i = mvel_n1[i_node];
const double h = mHmin[i_node];
const double m_inv = mr_matrix_container.GetInvertedMass() [i_node];
for (unsigned int csr_index = mr_matrix_container.GetRowStartIndex() [i_node]; csr_index != mr_matrix_container.GetRowStartIndex() [i_node + 1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex() [csr_index];
const array_1d<double, TDim>& U_j = mvel_n1[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues() [csr_index];
edge_ij.Add_grad_p (grad_vx, U_i[0], U_j[0]);
edge_ij.Add_grad_p (grad_vy, U_i[1], U_j[1]);
}
//finalize computation of the gradients
//set to zero the gradients
for (unsigned int comp = 0; comp < TDim; comp++)
{
grad_vx[comp] *= m_inv ;
grad_vy[comp] *= m_inv ;
}
//symmetrize and multiply by 2
grad_vx[0] *= 2.0;
grad_vy[1] *= 2.0;
grad_vx[1] += grad_vy[0];
grad_vy[0] += grad_vx[1];
//compute smagorinsky term
double aux = 0.0;
for (unsigned int comp = 0; comp < TDim; comp++)
{
aux += grad_vx[comp] * grad_vx[comp] ;
aux += grad_vy[comp] * grad_vy[comp] ;
}
aux *= 0.5;
if (aux < 0.0 ) aux=0.0;
double turbulent_viscosity = Cs*h*h*sqrt (aux) /**MolecularViscosity*/;
mViscosity[i_node] = turbulent_viscosity + MolecularViscosity;
}
mr_matrix_container.WriteScalarToDatabase (VISCOSITY, mViscosity, rNodes);
KRATOS_CATCH ("");
}
void Add_Effective_Inverse_Multiply (
CalcVectorType& destination,
const CalcVectorType& origin1,
const double value,
const ValuesVectorType& mass,
const ValuesVectorType& diag_stiffness,
const CalcVectorType& origin
)
{
KRATOS_TRY
int loop_size = destination.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& dest = destination[i_node];
const double m = mass[i_node];
const double d = diag_stiffness[i_node];
const array_1d<double, TDim>& origin_vec1 = origin1[i_node];
const array_1d<double, TDim>& origin_value = origin[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = value / (m + value*d) * ( m/value * origin_vec1[comp] + origin_value[comp] );
}
KRATOS_CATCH ("")
}
};
} //namespace Kratos
#undef SYMM_PRESS
#endif //KRATOS_EDGEBASED_LEVELSET_FLUID_SOLVER_H_INCLUDED defined
|
owl_ndarray_pool_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz>
*/
#ifdef OWL_ENABLE_TEMPLATE
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int pr = 0, pc = 0;
if (padding != 1){
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, c);
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (spatial_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
const int ksize = kernel_cols * kernel_rows;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
if (pad_cols < 0) pad_cols = 0;
if (pad_rows < 0) pad_rows = 0;
memset(input_backward_ptr, 0,
batches * input_cols * input_rows * in_channel * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
idx[c++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < c; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, c);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, c);
#endif
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (cuboid) (
value vInput, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
memset(output_ptr, 0, batches * output_crdi * sizeof(TYPE));
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c){
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
counter++;
}
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, counter);
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid) (value * argv, int argn) {
return FUN_NATIVE (cuboid) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vCol_stride, value vRow_stride, value vDpt_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int col_stride = Long_val(vCol_stride);
int row_stride = Long_val(vRow_stride);
int dpt_stride = Long_val(vDpt_stride);
int padding = Long_val(vPadding);
const int ksize = kernel_cols * kernel_rows * kernel_dpts;
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
memset(input_backward_ptr, 0, batches * input_crdi * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
idx[counter++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < counter; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, counter);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, counter);
#endif
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
#ifdef OWL_NDARRAY_MAX
CAMLprim value FUN_NATIVE (spatial_arg) (
value vInput_ptr, value vOutput_ptr, value vArgmax_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
struct caml_ba_array *AG = Caml_ba_array_val(vArgmax_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int64_t *argmax_ptr = (int64_t *) AG->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
if (pad_rows < 0) pad_rows = 0.;
if (pad_cols < 0) pad_cols = 0.;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
memset(argmax_ptr, 0, batches * output_cri * sizeof(int64_t));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int max_idx = -1;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = acc;
*(argmax_ptr + output_idx) = (int64_t) max_idx;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_arg) (value * argv, int argn) {
return FUN_NATIVE (spatial_arg) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
#endif /* OWL_NDARRAY_MAX */
#endif /* OWL_ENABLE_TEMPLATE */
|
openmp-unsupported.c | // RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core.builtin -fopenmp -verify %s
// expected-no-diagnostics
void openmp_parallel_crash_test() {
#pragma omp parallel
;
}
|
wyhash.c | /* Author: Wang Yi <godspeed_china@yeah.net> */
#ifndef wyhash_version_1
#define wyhash_version_1
#include <string.h>
#include <math.h>
#if defined(_MSC_VER) && defined(_M_X64)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif
const unsigned long long _wyp0=0xa0761d6478bd642full, _wyp1=0xe7037ed1a0b428dbull, _wyp2=0x8ebc6af09c88c6e3ull;
const unsigned long long _wyp3=0x589965cc75374cc3ull, _wyp4=0x1d8e4e27c47d124full, _wyp5=0xeb44accab455d165ull;
static inline unsigned long long _wymum(unsigned long long A, unsigned long long B){
#ifdef __SIZEOF_INT128__
__uint128_t r=A; r*=B; return (r>>64)^r;
#elif defined(_MSC_VER) && defined(_M_X64)
A=_umul128(A, B, &B); return A^B;
#else
unsigned long long ha=A>>32, hb=B>>32, la=(unsigned int)A, lb=(unsigned int)B, hi, lo;
unsigned long long rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c;
return hi^lo;
#endif
}
static inline unsigned long long _wyr08(const unsigned char *p){ unsigned char v; memcpy(&v, p, 1); return v; }
static inline unsigned long long _wyr16(const unsigned char *p){ unsigned short v; memcpy(&v, p, 2); return v; }
static inline unsigned long long _wyr32(const unsigned char *p){ unsigned int v; memcpy(&v, p, 4); return v; }
static inline unsigned long long _wyr64(const unsigned char *p){ unsigned long long v; memcpy(&v, p, 8); return v; }
static inline unsigned long long __wyr64(const unsigned char *p){ return (_wyr32(p)<<32)|_wyr32(p+4); }
unsigned long long wyhash(const void* key, unsigned long long len, unsigned long long seed){
const unsigned char *p=(const unsigned char*)key; unsigned long long i;
for(i=0; i+32<=len; i+=32, p+=32) seed=_wymum(seed^_wyp0, _wymum(_wyr64(p)^_wyp1,_wyr64(p+8)^_wyp2)^_wymum(_wyr64(p+16)^_wyp3,_wyr64(p+24)^_wyp4));
seed^=_wyp0;
switch(len&31){
case 1: seed=_wymum(seed,_wyr08(p)^_wyp1); break;
case 2: seed=_wymum(seed,_wyr16(p)^_wyp1); break;
case 3: seed=_wymum(seed,((_wyr16(p)<<8)|_wyr08(p+2))^_wyp1); break;
case 4: seed=_wymum(seed,_wyr32(p)^_wyp1); break;
case 5: seed=_wymum(seed,((_wyr32(p)<<8)|_wyr08(p+4))^_wyp1); break;
case 6: seed=_wymum(seed,((_wyr32(p)<<16)|_wyr16(p+4))^_wyp1); break;
case 7: seed=_wymum(seed,((_wyr32(p)<<24)|(_wyr16(p+4)<<8)|_wyr08(p+6))^_wyp1); break;
case 8: seed=_wymum(seed,__wyr64(p)^_wyp1); break;
case 9: seed=_wymum(__wyr64(p)^seed,_wyr08(p+8)^_wyp2); break;
case 10: seed=_wymum(__wyr64(p)^seed,_wyr16(p+8)^_wyp2); break;
case 11: seed=_wymum(__wyr64(p)^seed,((_wyr16(p+8)<<8)|_wyr08(p+8+2))^_wyp2); break;
case 12: seed=_wymum(__wyr64(p)^seed,_wyr32(p+8)^_wyp2); break;
case 13: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<8)|_wyr08(p+8+4))^_wyp2); break;
case 14: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<16)|_wyr16(p+8+4))^_wyp2); break;
case 15: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<24)|(_wyr16(p+8+4)<<8)|_wyr08(p+8+6))^_wyp2); break;
case 16: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2); break;
case 17: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr08(p+16)^_wyp3); break;
case 18: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr16(p+16)^_wyp3); break;
case 19: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr16(p+16)<<8)|_wyr08(p+16+2))^_wyp3); break;
case 20: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr32(p+16)^_wyp3); break;
case 21: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<8)|_wyr08(p+16+4))^_wyp3); break;
case 22: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<16)|_wyr16(p+16+4))^_wyp3); break;
case 23: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<24)|(_wyr16(p+16+4)<<8)|_wyr08(p+16+6))^_wyp3); break;
case 24: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,__wyr64(p+16)^_wyp3); break;
case 25: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr08(p+24)^_wyp4); break;
case 26: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr16(p+24)^_wyp4); break;
case 27: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr16(p+24)<<8)|_wyr08(p+24+2))^_wyp4); break;
case 28: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr32(p+24)^_wyp4); break;
case 29: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<8)|_wyr08(p+24+4))^_wyp4); break;
case 30: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<16)|_wyr16(p+24+4))^_wyp4); break;
case 31: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<24)|(_wyr16(p+24+4)<<8)|_wyr08(p+24+6))^_wyp4); break;
}
return _wymum(seed, len^_wyp5);
}
static inline unsigned long long wyhash64(unsigned long long A, unsigned long long B){ return _wymum(_wymum(A^_wyp0, B^_wyp1), _wyp2); }
static inline double wy2u01(unsigned long long r){ const double _wynorm=1.0/(1ull<<52); return (r&0x000fffffffffffffull)*_wynorm; }
static inline float wy2gau(unsigned long long r){ const float _wynorm1=1.0f/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+(r>>43))*_wynorm1-3.0f; }
static unsigned long long _wyrand_seed=0;
#define WYRAND_MAX 0xffffffffffffffffull
static inline void wysrand(unsigned long long seed){ _wyrand_seed=seed; }
static inline unsigned long long wyrand(void){
unsigned long long s;
#if defined(_OPENMP)
#pragma omp atomic capture
#endif
{
_wyrand_seed += _wyp0;
s = _wyrand_seed;
}
return _wymum(s^_wyp1,s);
}
#endif
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
# include <stdlib.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 100000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
#ifndef DYNAMIC
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
#else
static STREAM_TYPE* a;
static STREAM_TYPE* b;
static STREAM_TYPE* c;
#endif
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
#ifdef DYNAMIC
a = (double* ) malloc(sizeof(double) * (STREAM_ARRAY_SIZE + OFFSET));
b = (double* ) malloc(sizeof(double) * (STREAM_ARRAY_SIZE + OFFSET));
c = (double* ) malloc(sizeof(double) * (STREAM_ARRAY_SIZE + OFFSET));
#endif
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
#ifdef DYNAMIC
free(a);
free(b);
free(c);
#endif
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults ()
{
STREAM_TYPE aj,bj,cj,scalar;
STREAM_TYPE aSumErr,bSumErr,cSumErr;
STREAM_TYPE aAvgErr,bAvgErr,cAvgErr;
double epsilon;
ssize_t j;
int k,ierr,err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
}
else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
}
else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr/aj) > epsilon) {
err++;
printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(a[j]/aj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n",ierr);
}
if (abs(bAvgErr/bj) > epsilon) {
err++;
printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(b[j]/bj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n",ierr);
}
if (abs(cAvgErr/cj) > epsilon) {
err++;
printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon);
printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj);
printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon);
ierr = 0;
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
if (abs(c[j]/cj-1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n",ierr);
}
if (err == 0) {
printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif
|
header.h | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// The following include file is generated automatically by the
// "setparams" utility. It defines
// problem_size: 12, 64, 102, 162 (for class T, A, B, C)
// dt_default: default time step for this problem size if no
// config file
// niter_default: default number of iterations for this problem size
//---------------------------------------------------------------------
#include "npbparams.h"
#include "type.h"
#include "timers.h"
/* common /global/ */
extern int grid_points[3], nx2, ny2, nz2;
extern logical timeron;
/* common /constants/ */
extern double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
ce[5][13], dxmax, dymax, dzmax, xxcon1, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1,
dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2,
c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
#define IMAXP (IMAX/2*2)
#define JMAXP (JMAX/2*2)
//---------------------------------------------------------------------
// To improve cache performance, first two dimensions padded by 1
// for even number sizes only
//---------------------------------------------------------------------
/* common /fields/ */
extern double u [KMAX][JMAXP+1][IMAXP+1][5];
extern double us [KMAX][JMAXP+1][IMAXP+1];
extern double vs [KMAX][JMAXP+1][IMAXP+1];
extern double ws [KMAX][JMAXP+1][IMAXP+1];
extern double qs [KMAX][JMAXP+1][IMAXP+1];
extern double rho_i [KMAX][JMAXP+1][IMAXP+1];
extern double speed [KMAX][JMAXP+1][IMAXP+1];
extern double square [KMAX][JMAXP+1][IMAXP+1];
extern double rhs [KMAX][JMAXP+1][IMAXP+1][5];
extern double forcing[KMAX][JMAXP+1][IMAXP+1][5];
/* common /work_1d/ */
extern double cv [PROBLEM_SIZE];
extern double rhon[PROBLEM_SIZE];
extern double rhos[PROBLEM_SIZE];
extern double rhoq[PROBLEM_SIZE];
extern double cuf [PROBLEM_SIZE];
extern double q [PROBLEM_SIZE];
extern double ue [PROBLEM_SIZE][5];
extern double buf[PROBLEM_SIZE][5];
#pragma omp threadprivate(cv,rhon,rhos,rhoq,cuf,q,ue,buf)
/* common /work_lhs/ */
extern double lhs [IMAXP+1][IMAXP+1][5];
extern double lhsp[IMAXP+1][IMAXP+1][5];
extern double lhsm[IMAXP+1][IMAXP+1][5];
#pragma omp threadprivate(lhs,lhsp,lhsm)
//kai
extern int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11, k12, k13, k14, k15, k16;
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
#define t_total 1
#define t_rhsx 2
#define t_rhsy 3
#define t_rhsz 4
#define t_rhs 5
#define t_xsolve 6
#define t_ysolve 7
#define t_zsolve 8
#define t_rdis1 9
#define t_rdis2 10
#define t_txinvr 11
#define t_pinvr 12
#define t_ninvr 13
#define t_tzetar 14
#define t_add 15
#define t_last 15
//-----------------------------------------------------------------------
void initialize();
void lhsinit(int ni, int nj);
void lhsinitj(int nj, int ni);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void ninvr();
void y_solve();
void pinvr();
void z_solve();
void tzetar();
void add();
void txinvr();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *Class, logical *verified);
|
delete_inf_refcount.c | // RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu
// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda
#include <stdio.h>
#include <omp.h>
#pragma omp declare target
int isHost;
#pragma omp end declare target
int main(void) {
isHost = -1;
#pragma omp target enter data map(to: isHost)
#pragma omp target
{ isHost = omp_is_initial_device(); }
#pragma omp target update from(isHost)
if (isHost < 0) {
printf("Runtime error, isHost=%d\n", isHost);
}
#pragma omp target exit data map(delete: isHost)
// CHECK: Target region executed on the device
printf("Target region executed on the %s\n", isHost ? "host" : "device");
return isHost;
}
|
mg.c | /*
!-------------------------------------------------------------------------!
! !
! N A S P A R A L L E L B E N C H M A R K S 3.0 !
! !
! J A V A V E R S I O N !
! !
! MG !
! !
!-------------------------------------------------------------------------!
! !
! This benchmark is a serial/multithreaded version of the !
! NPB3_0_JAV MG code. !
! !
! Permission to use, copy, distribute and modify this software !
! for any purpose with or without fee is hereby granted. We !
! request, however, that all derived work reference the NAS !
! Parallel Benchmarks 3.0. This software is provided "as is" !
! without express or implied warranty. !
! !
! Information on NPB 3.0, including the Technical Report NAS-02-008 !
! "Implementation of the NAS Parallel Benchmarks in Java", !
! original specifications, source code, results and information !
! on how to submit new results, is available at: !
! !
! http://www.nas.nasa.gov/Software/NPB/ !
! !
! Send comments or suggestions to npb@nas.nasa.gov !
! !
! NAS Parallel Benchmarks Group !
! NASA Ames Research Center !
! Mail Stop: T27A-1 !
! Moffett Field, CA 94035-1000 !
! !
! E-mail: npb@nas.nasa.gov !
! Fax: (650) 604-3957 !
! !
!-------------------------------------------------------------------------!
! Authors: E. Barszcz !
! P. Frederickson !
! A. Woo !
! M. Yarrow !
! Translation to Java and MultiThreaded Code !
! M. Frumkin !
! M. Schultz !
!-------------------------------------------------------------------------!
*/
/*
-=Current Goals=-
-start parallelizing the "v" cycle
-=Command Line=-
mpirun -np 2 ./mg -n 64
parameter options:
-s : seed
-n : problem size
-nit : number of iterations
-lt : sets the lt... i think this defines how large the "W" cycle is?
*/
#include <omp.h>
#include "functions/setup.h"
#include "functions/results.h"
#include "random.h"
#include "utility.h"
#include "timer.h"
#include "mg.h"
#define MIN_ELEMS_PER_PROC 2
//Some global constants
const char * BMName="MG"; //Benchmark name
int main(int argc, char **argv)
{
int mpi_rank,mpi_size;
MPI_Init( &argc, &argv );
/*local processor setup-
-initializes variables that will be the same for every processor
-parses command line options
*/
global_params = setup_local(argc,argv);
//k is the current level. It is passed down through subroutine args and is NOT global.
//it is the current iteration.
int k, it;
//pointers of pointers in 3-D
REAL**** u, //approximation matrix
**** r, //residual error matrix
*** v, //values matrix
a[4], c[4]; //what are these used for?
double rnm2, tinit;
int n1, n2, n3, nit;
int i;
//use global parameters to set these
lt = global_params->lt;
nit = global_params->n_it;
nx[lt-1] = global_params->n_size;
ny[lt-1] = global_params->n_size;
nz[lt-1] = global_params->n_size;
Class = global_params->class;
//set a and b matrices for smoother using the class value
set_a(a,global_params);
set_c(c,global_params);
k = lt;
//setup initializes some of the variables used...
grid_t grid;
setup(&n1, &n2, &n3, &grid);
//PPF_Print( MPI_COMM_WORLD, "n1=%d, n2=%d, n3=%d, nxk=%d, nyk=%d\n", n1, n2, n3, nx[lt-1], ny[lt-1]);
//processor 0 tracks time and sets up solution matrix
if(global_params->mpi_rank==0){
init_timers();
timer_start(T_init);
}
v = alloc3D(n1, n2, n3);
gen_v(v,n1,n2,n3,nx[lt-1],ny[lt-1], &grid);
//Initialize arrays- currently does this in strips
// printf("N1= %d, N3=%d\n",n1-2,((n3-2) / global_params->mpi_size + 2 + 2));
//PPF_Print( MPI_COMM_WORLD, "n3=%d\n", n3 );
// how the matrixes should be split up from the start.
// TODO: write a new alloc function that is smarter than this one....
//u = allocGrids(lt, (n1-2)/global_params->mpi_size, n2-2, n3-2, 2);
//r = allocGrids(lt, (n1-2)/global_params->mpi_size, n2-2, n3-2, 2);
// Just for testing, should be removed in the finished design
u = allocGrids(lt, n1-2, n2-2, n3-2, 2);
//PPF_Print( MPI_COMM_WORLD, "n1=%d, n2=%d, n3=%d\n", n1, n2, n3 );
r = allocGrids(lt, n1-2, n2-2, n3-2, 2);
zero3(u[0],n1,n2,(n3-2) / global_params->mpi_size + 2); //zero-out all of u
//create local v (different strip per processor)
REAL *** local_v = splitMatrix(
v, //matrix to split
n1,n2,n3, //matrix size
global_params->mpi_rank,
global_params->mpi_size,
false //don't put a buffer on this!
);
//exchange local_v data
if(global_params->mpi_size > 1){
exchange(local_v,n1,n2,(n3-2)/global_params->mpi_size+2);
}
comm3(local_v,n1,n2,(n3-2)/global_params->mpi_size+2);
// if(global_params->mpi_rank == 0){
// printMatrix(local_v,n1,n2,(n3-2)/global_params->mpi_size+2);
// }
MPI_Barrier(MPI_COMM_WORLD);
if(global_params->mpi_rank==0){
timer_stop(T_init);
timer_start(T_bench);
}
//PPF_Print( MPI_COMM_WORLD, "n1=%d, n2=%d, n3=%d\n", n1, n2, n3 );
resid(u[0],local_v,r[0],n1,n2,(n3-2)/global_params->mpi_size + 2,a);
// if(global_params->mpi_rank == 1){
// printMatrix(r[0],n1,n2,(n3-2)/global_params->mpi_size + 2);
// }
//each processor runs the multigrid algorithm
for(it=1;it<=nit;it++) {
//actual call to multigrid
//MPI_Barrier(MPI_COMM_WORLD);
mg3P(u,local_v,r,a,c,n1,n2,(n3-2)/global_params->mpi_size + 2);
//compute the residual error here...
//only pass in the spliced portion of v...
resid(u[0],local_v,r[0],n1,n2,(n3-2)/global_params->mpi_size + 2,a);
}
MPI_Barrier(MPI_COMM_WORLD);
if(global_params->mpi_rank==0){
timer_stop(T_bench);
tinit = timer_elapsed(T_init);
printf(" Initialization time: %f seconds\n", tinit);
}
//PPF_Print( MPI_COMM_WORLD, "n1=%d, n2=%d, n3=%d, nx=%d, ny=%d, nz=%d\n", n1, n2, n3, nx[lt-1],ny[lt-1],nz[lt-1] );
rnm2 = norm2u3(r[0], n1, n2, (n3-2)/global_params->mpi_size+2, nx[lt-1],ny[lt-1],nz[lt-1]);
MPI_Barrier(MPI_COMM_WORLD);
//validates the results and prints to console
if(global_params->mpi_rank==0){
double tm = timer_elapsed(T_bench);
interpret_results(rnm2, global_params, tm);
}
free(v);
freeGrids(u);
freeGrids(r);
MPI_Barrier(MPI_COMM_WORLD);
//PPF_Print( MPI_COMM_WORLD, "Message from %N - finished\n" );
MPI_Finalize();
return 0;
}
void gen_v(REAL*** z,int n1,int n2,int n3,int nx,int ny, grid_t* grid)
{
//---------------------------------------------------------------------
// Generate righthandside of the equation A*u = v
// for the serial version of the code.
//---------------------------------------------------------------------
int m0, m1, mm=10, i1, i2, i3, i;
int *j1 = malloc(sizeof(int)*mm*2),
*j2 = malloc(sizeof(int)*mm*2),
*j3 = malloc(sizeof(int)*mm*2);
zran3(z,n1,n2,n3,nx,ny,j1,j2,j3, &m1, &m0, mm, grid);
#pragma omp parallel for private(i1,i2,i3)
for(i3=0;i3<n3;i3++)
for(i2=0;i2<n2;i2++)
for(i1=0;i1<n1;i1++)
z[i3][i2][i1] = 0.0;
for(i=mm;i>=m0;i--)
z[j3[i-1]][j2[i-1]][j1[i-1]] = -1.0;
for(i=mm;i>=m1;i--)
z[j3[i-1+mm]][j2[i-1+mm]][j1[i-1+mm]] = 1.0;
free(j1);
free(j2);
free(j3);
comm3(z,n1,n2,n3);
}
void zran3(REAL ***z,int n1,int n2,int n3,int nx,int ny,int* j1,int* j2,int* j3,int *m1, int *m0, int mm, grid_t* grid){
int is1 = grid->is1, is2 = grid->is2, is3 = grid->is3, ie1 = grid->ie1, ie2 = grid->ie2, ie3 = grid->ie3;
int i, i0, i1, i2, i3, d1, e1, e2, e3;
int *jg = malloc(sizeof(int)*4*mm*2);
double xx, x0, x1, a1, a2, ai;
double best;
double *ten= malloc(sizeof(double)*mm*2);
zero3(z,n1,n2,n3);
i = is1-2+nx*(is2-2+ny*(is3-2));
d1 = ie1 - is1 + 1;
e1 = ie1 - is1 + 2;
e2 = ie2 - is2 + 2;
e3 = ie3 - is3 + 2;
double seed=314159265.0, a=pow(5.0,13);
//double rng = drand48();
a1 = rnd_power( a, nx );
a2 = rnd_power( a, nx*ny );
ai = rnd_power( a, i );
x0 = rnd_randlc( seed, ai );
for(i3=2;i3<=e3;i3++) {
x1 = x0;
for(i2 = 2;i2<=e2;i2++) {
xx = x1;
rnd_vranlc( d1, xx, a,z[0][0],(1+n1*(i2-1+n2*(i3-1))));
x1 = rnd_randlc( x1, a1 );
}
x0 = rnd_randlc( x0, a2 );
}
for(i=0;i<mm;i++) {
ten[i+mm] = 0.0;
j1[i+mm] = 0;
j2[i+mm] = 0;
j3[i+mm] = 0;
ten[i] = 1.0;
j1[i] = 0;
j2[i] = 0;
j3[i] = 0;
}
for(i3=1;i3<n3-1;i3++) {
for(i2=1;i2<n2-1;i2++) {
for(i1=1;i1<n1-1;i1++) {
if( z[i3][i2][i1] > ten[mm] ) {
ten[mm] = z[i3][i2][i1];
j1[mm] = i1;
j2[mm] = i2;
j3[mm] = i3;
bubble( ten, j1, j2, j3, mm, 1 );
}
if( z[i3][i2][i1] < ten[0] ) {
ten[0] = z[i3][i2][i1];
j1[0] = i1;
j2[0] = i2;
j3[0] = i3;
bubble( ten, j1, j2, j3, mm, 0 );
}
}
}
}
//---------------------------------------------------------------------
// Now which of these are globally best?
//---------------------------------------------------------------------
i1 = mm;
i0 = mm;
for(i=mm-1;i>=0;i--)
{
//best = z[0][0][j1[i1-1+mm]+n1*(j2[i1-1+mm]+n2*(j3[i1-1+mm]))];
best = z[j1[i1-1+mm]][j2[i1-1+mm]][j3[i1-1+mm]];
if(best==z[j1[i1-1+mm]][j2[i1-1+mm]][j3[i1-1+mm]])
{
jg[4*(i+mm)] = 0;
jg[1+4*(i+mm)] = is1 - 2 + j1[i1-1+mm];
jg[2+4*(i+mm)] = is2 - 2 + j2[i1-1+mm];
jg[3+4*(i+mm)] = is3 - 2 + j3[i1-1+mm];
i1 = i1-1;
} else {
jg[4*(i+mm)] = 0;
jg[1+4*(i+mm)] = 0;
jg[2+4*(i+mm)] = 0;
jg[3+4*(i+mm)] = 0;
}
ten[i+mm] = best;
best = z[j3[i0-1]][j2[i0-1]][j1[i0-1]];
if(best==z[j3[i0-1]][j2[i0-1]][j1[i0-1]]) {
jg[4*i] = 0;
jg[1+4*i] = is1 - 2 + j1[i0-1];
jg[2+4*i] = is2 - 2 + j2[i0-1];
jg[3+4*i] = is3 - 2 + j3[i0-1];
i0 = i0-1;
} else {
jg[4*i] = 0;
jg[1+4*i] = 0;
jg[2+4*i] = 0;
jg[3+4*i] = 0;
}
ten[i] = best;
}
free(jg);
free(ten);
*m1 = i1+1;
*m0 = i0+1;
}
void gen_v_orig(REAL ***z,int n1,int n2,int n3,int nx,int ny, grid_t* grid){
int is1 = grid->is1, is2 = grid->is2, is3 = grid->is3, ie1 = grid->ie1, ie2 = grid->ie2, ie3 = grid->ie3;
int i0, m0, m1;
int mm=10, i1, i2, i3, d1, e1, e2, e3;
double xx, x0, x1, a1, a2, ai;
double best;
double *ten= malloc(sizeof(double)*mm*2);
int i;
int *j1 = malloc(sizeof(int)*mm*2),
*j2 = malloc(sizeof(int)*mm*2),
*j3 = malloc(sizeof(int)*mm*2);
int *jg = malloc(sizeof(int)*4*mm*2);
zero3(z,n1,n2,n3);
i = is1-2+nx*(is2-2+ny*(is3-2));
d1 = ie1 - is1 + 1;
e1 = ie1 - is1 + 2;
e2 = ie2 - is2 + 2;
e3 = ie3 - is3 + 2;
double seed=314159265.0, a=pow(5.0,13);
//double rng = drand48();
a1 = rnd_power( a, nx );
a2 = rnd_power( a, nx*ny );
ai = rnd_power( a, i );
x0 = rnd_randlc( seed, ai );
for(i3=2;i3<=e3;i3++)
{
x1 = x0;
for(i2 = 2;i2<=e2;i2++)
{
xx = x1;
rnd_vranlc( d1, xx, a,z[0][0],(1+n1*(i2-1+n2*(i3-1))));
x1 = rnd_randlc( x1, a1 );
}
x0 = rnd_randlc( x0, a2 );
}
for(i=0;i<mm;i++) {
ten[i+mm] = 0.0;
j1[i+mm] = 0;
j2[i+mm] = 0;
j3[i+mm] = 0;
ten[i] = 1.0;
j1[i] = 0;
j2[i] = 0;
j3[i] = 0;
}
for(i3=1;i3<n3-1;i3++) {
for(i2=1;i2<n2-1;i2++) {
for(i1=1;i1<n1-1;i1++) {
if( z[i3][i2][i1] > ten[mm] ) {
ten[mm] = z[i3][i2][i1];
j1[mm] = i1;
j2[mm] = i2;
j3[mm] = i3;
bubble( ten, j1, j2, j3, mm, 1 );
}
if( z[i3][i2][i1] < ten[0] ) {
ten[0] = z[i3][i2][i1];
j1[0] = i1;
j2[0] = i2;
j3[0] = i3;
bubble( ten, j1, j2, j3, mm, 0 );
}
}
}
}
//---------------------------------------------------------------------
// Now which of these are globally best?
//---------------------------------------------------------------------
i1 = mm;
i0 = mm;
for(i=mm-1;i>=0;i--)
{
//best = z[0][0][j1[i1-1+mm]+n1*(j2[i1-1+mm]+n2*(j3[i1-1+mm]))];
best = z[j1[i1-1+mm]][j2[i1-1+mm]][j3[i1-1+mm]];
if(best==z[j1[i1-1+mm]][j2[i1-1+mm]][j3[i1-1+mm]]) {
jg[4*(i+mm)] = 0;
jg[1+4*(i+mm)] = is1 - 2 + j1[i1-1+mm];
jg[2+4*(i+mm)] = is2 - 2 + j2[i1-1+mm];
jg[3+4*(i+mm)] = is3 - 2 + j3[i1-1+mm];
i1 = i1-1;
} else {
jg[4*(i+mm)] = 0;
jg[1+4*(i+mm)] = 0;
jg[2+4*(i+mm)] = 0;
jg[3+4*(i+mm)] = 0;
}
ten[i+mm] = best;
best = z[j3[i0-1]][j2[i0-1]][j1[i0-1]];
if(best==z[j3[i0-1]][j2[i0-1]][j1[i0-1]]) {
jg[4*i] = 0;
jg[1+4*i] = is1 - 2 + j1[i0-1];
jg[2+4*i] = is2 - 2 + j2[i0-1];
jg[3+4*i] = is3 - 2 + j3[i0-1];
i0 = i0-1;
} else {
jg[4*i] = 0;
jg[1+4*i] = 0;
jg[2+4*i] = 0;
jg[3+4*i] = 0;
}
ten[i] = best;
}
free(jg);
free(ten);
m1 = i1+1;
m0 = i0+1;
#pragma omp parallel for private(i1,i2,i3)
for(i3=0;i3<n3;i3++)
for(i2=0;i2<n2;i2++)
for(i1=0;i1<n1;i1++)
z[i3][i2][i1] = 0.0;
for(i=mm;i>=m0;i--)
z[j3[i-1]][j2[i-1]][j1[i-1]] = -1.0;
for(i=mm;i>=m1;i--)
z[j3[i-1+mm]][j2[i-1+mm]][j1[i-1+mm]] = 1.0;
free(j1);
free(j2);
free(j3);
comm3(z,n1,n2,n3);
}
double norm2u3(REAL*** r,int n1,int n2,int n3, int nx,int ny,int nz)
{
//---------------------------------------------------------------------
// norm2u3 evaluates approximations to the L2 norm and the
// uniform (or L-infinity or Chebyshev) norm, under the
// assumption that the boundaries are periodic or zero. Add the
// boundaries in with half weight (quarter weight on the edges
// and eighth weight at the corners) for inhomogeneous boundaries.
//---------------------------------------------------------------------
// double precision r(n1,n2,n3)
REAL local_rnm2 = 0.0, global_rnm2=0.0;
int i1,i2,i3;
#pragma omp parallel private(i1,i2,i3)
{
#pragma omp for reduction (+:local_rnm2)
for(i3=1;i3<n3-1;i3++)
{
for(i2=1;i2<n2-1;i2++)
{
for(i1=1;i1<n1-1;i1++)
{
local_rnm2+=r[i3][i2][i1]*r[i3][i2][i1];
}
}
}
}
MPI_Reduce(&local_rnm2, &global_rnm2, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if(global_params->mpi_rank==0){
// printf("\nNZ=%d\nN3=%d",nz,n3);
global_rnm2 = sqrt( global_rnm2 / ((double) nx*ny*nz ));
}
return global_rnm2;
}
void resid(REAL ***u, REAL*** v, REAL*** r,
int n1,int n2,int n3, double a[4])
{
//NOTE: Au = v - r
//---------------------------------------------------------------------
// resid computes the residual: r = v - Au
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition (or Subtraction) and
// Multiplication, respectively.
// Presuming coefficient a(1) is zero (the NPB assumes this,
// but it is thus not a general case), 3A + 1M may be eliminated,
// resulting in 12A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
int i3, i2, i1;
static bool resid_init = false;
//Private arrays for each thread
static double **_u1;
static double **_u2;
double *u1, *u2;
if (!resid_init) {
_u1 = (REAL**)malloc(sizeof(REAL*)*omp_get_max_threads());
_u2 = (double**)malloc(sizeof(REAL*)*omp_get_max_threads());
for (i1 = 0; i1 < omp_get_max_threads(); i1++) {
_u1[i1] = malloc(sizeof(REAL)*(nm+1));
_u2[i1] = malloc(sizeof(REAL)*(nm+1));
}
resid_init = true;
}
//cycles through each dimension
#pragma omp parallel private(i1,i2,i3,u1,u2)
{
u1 = _u1[omp_get_thread_num()];
u2 = _u2[omp_get_thread_num()];
#pragma omp for
for(i3=1;i3<n3-1;i3++)
{
for(i2=1;i2<n2-1;i2++)
{
for(i1=0;i1<n1;i1++)
{
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for(i1=1;i1<n1-1;i1++)
{
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
//---------------------------------------------------------------------
// Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0)
//---------------------------------------------------------------------
// > - a[1] * ( u(i1-1,i2,i3) + u(i1+1,i2,i3)
// > + u1(i1) )
//---------------------------------------------------------------------
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}
}
//---------------------------------------------------------------------
// exchange boundary data
// 1- accross processors
// 2- accross strips
//---------------------------------------------------------------------
//exchange(r,n1,n2,n3);
comm3(r,n1,n2,n3);
}
//Multigrid 3-Dimensions function
void mg3P(REAL**** u, REAL*** v, REAL**** r, double a[4], double c[4], int n1,int n2,int n3)
{
//---------------------------------------------------------------------
// multigrid V-cycle routine
//---------------------------------------------------------------------
// double precision u(nr),v(nv),r(nr)
int j,k,x,y,z;
int split, split_limit;
split = 0;
//---------------------------------------------------------------------
// down cycle.
// restrict the residual from the fine grid to the coarse
//---------------------------------------------------------------------
//MEMORY TODO : check m1,m2,m3
for(k=lt-1;k>=1;k--) {
j = k-1;
//PPF_Print(MPI_COMM_WORLD, "m3k=%d\n", m3[k]);
// if to few elements per procs in Z, reduce proc count by two.
if((m3[k] - 2) < MIN_ELEMS_PER_PROC && global_params->mpi_size > 1){
MPI_Request req;
MPI_Status status;
REAL* message;
if(split==0) split = 1;
global_params->mpi_size = global_params->mpi_size - split;
// if not divisible by split, send of your matrix
if(global_params->mpi_rank % (split*2) != 0 && global_params->active){
//if(global_params->mpi_rank == 2){
// printf("rank: %d\n", global_params->mpi_rank);
// printMatrix(r[lt-1-k],m1[k],m2[k],m3[k]);
//}
global_params->active = false;
message = flattenMatrix(r[lt-1-k], m1[k], m2[k], m3[k]);
MPI_Isend(message, m1[k]*m2[k]*m3[k], MPI_DOUBLE,
global_params->mpi_rank-split, 4, MPI_COMM_WORLD, &req);
MPI_Wait(&req, &status);
free(message);
}
// else recv elements to work on
else if(global_params->active){
//if(global_params->mpi_rank == 0){
// printf("rank: %d\n", global_params->mpi_rank);
// printMatrix(r[lt-1-k],m1[k],m2[k],m3[k]);
//}
message = (REAL*) malloc(sizeof(REAL)*m1[k]*m2[k]*m3[k]);
MPI_Irecv(message, m1[k]*m2[k]*m3[k], MPI_DOUBLE,
global_params->mpi_rank + split, 4,MPI_COMM_WORLD, &req);
MPI_Wait(&req, &status);
for(z=2; z<m3[k]+1; z++){
for(y=0; y<m2[k]; y++){
for(x=0; x<m1[k]; x++){
r[lt-1-k][z][y][x] = message[m2[k]*m1[k]*(z-1) + m1[k]*y + x];
}
}
}
//if(global_params->mpi_rank==0) printMatrix(r[lt-1-k],m1[k],m2[k],m3[k]+1);
free(message);
}
split *= 2;
m3[k] = m3[k] + 1;
global_params->neig_offset = split;
}
//PPF_Print(MPI_COMM_WORLD, "Active: %d\n", global_params ? 1 : 0);
//PPF_Print(MPI_COMM_WORLD, "k=%d, j=%d, m1k=%d, m2k=%d, m3k=%d, m1j=%d, m2j, m3j\n", k, j, m1[k], m2[k],m3[k],m1[j],m2[j],m3[j]);
if(global_params->active){
rprj3_mpi(r[lt-1-k],m1[k],m2[k],m3[k],r[lt-1-j],m1[j],m2[j],m3[j]);
}
//ERROR HAS HAPPENED BY THIS TIME- it looks like the last matrix is not filled out
// if(global_params->mpi_rank == 0 && k==lt-1){
// printMatrix(r[lt-1-k],m1[k],m2[k],m3[k]);
// }
}
k = 0;
//global_params->mpi_size = global_params->mpi_size - split;
//PPF_Print(MPI_COMM_WORLD, "split: %d\n", split);
//PPF_Print(MPI_COMM_WORLD, "Active: %d\n", global_params->active ? 1 : 0);
//PPF_Print(MPI_COMM_WORLD, "mpi_size: %d\n", global_params->mpi_size);
//---------------------------------------------------------------------
// compute an approximate solution on the coarsest grid
//---------------------------------------------------------------------
if(global_params->active){
zero3(u[lt-1-k],m1[k],m2[k],m3[k]);
psinv(r[lt-1-k],u[lt-1-k],m1[k],m2[k],m3[k], c);
}
for(k=1;k<lt-1;k++) {
j = k-1;
// split to more processors when level is reached again
//---------------------------------------------------------------------
// prolongate from level k-1 to k
//---------------------------------------------------------------------
if(global_params->active){
zero3(u[lt-1-k],m1[k],m2[k],m3[k]);
interp_mpi(u[lt-1-j],m1[j],m2[j],m3[j],u[lt-1-k], m1[k],m2[k],m3[k]);
//---------------------------------------------------------------------
// compute residual for level k
//---------------------------------------------------------------------
resid(u[lt-1-k],r[lt-1-k],r[lt-1-k],m1[k],m2[k],m3[k], a);
//---------------------------------------------------------------------
// apply smoother
//---------------------------------------------------------------------
psinv(r[lt-1-k],u[lt-1-k],m1[k],m2[k],m3[k],c);
}
if((m3[k]-2) >= MIN_ELEMS_PER_PROC && global_params->mpi_size < global_params->mpi_orig_size && split > 1){
MPI_Request req = MPI_REQUEST_NULL;
MPI_Status status;
REAL* message;
REAL* data;
split = split/2;
split = (split < 1) ? 1 : split;
split_limit = (split < 2) ? 2 : split;
global_params->mpi_size = global_params->mpi_size + split;
if((global_params->mpi_rank % split_limit) == 0 && global_params->active){
// send data
//if(global_params->mpi_rank == 0){
// printf("rank: %d\n", global_params->mpi_rank);
// printMatrix(r[lt-1-k],m1[k],m2[k],m3[k]);
//}
//printf("split: %d, sending: %d\n", split, global_params->mpi_rank);
data = flattenMatrix(r[lt-1-k], m1[k], m2[k], m3[k]);
message = (REAL*)malloc(sizeof(REAL)*m1[k]*m2[k]*(m3[k]-1));
for(z=1; z<m3[k]; z++){
for(y=0; y<m2[k]; y++){
for(x=0; x<m1[k]; x++){
message[m2[k]*m1[k]*(z-1) + m1[k]*y + x] = data[m2[k]*m1[k]*z + m1[k]*y + x];
}
}
}
MPI_Isend(message, m1[k]*m2[k]*(m3[k]-1), MPI_DOUBLE,
(global_params->mpi_rank+split), 4, MPI_COMM_WORLD, &req);
MPI_Wait(&req, &status);
free(data);
free(message);
} else if((global_params->mpi_rank % split) == 0 && !global_params->active){
// receive data
//printf("split: %d, receiving: %d, from: %d\n", split, global_params->mpi_rank, global_params->mpi_rank - split);
global_params->active = true;
message = (REAL*)malloc(sizeof(REAL)*m1[k]*m2[k]*(m3[k]-1));
MPI_Irecv(message, m1[k]*m2[k]*(m3[k]-1), MPI_DOUBLE,
(global_params->mpi_rank-split), 4,MPI_COMM_WORLD, &req);
MPI_Wait(&req, &status);
for(z=0; z<m3[k]-1; z++){
for(y=0; y<m2[k]; y++){
for(x=0; x<m1[k]; x++){
r[lt-1-k][z][y][x] = message[m2[k]*m1[k]*z + m1[k]*y + x];
}
}
}
//if(global_params->mpi_rank == 2){
// printf("rank: %d\n", global_params->mpi_rank);
// printMatrix(r[lt-1-k],m1[k],m2[k],m3[k]);
//}
free(message);
}
m3[k] -= 1;
//PPF_Print(MPI_COMM_WORLD, "m3k=%d", m3[k]);
//if(global_params->mpi_rank == 0){
// printf("rank: %d\n", global_params->mpi_rank);
// printMatrix(r[lt-1-k],m1[k],m2[k],m3[k]);
//}
global_params->neig_offset = split;
}
}
j = lt - 2;
k = lt - 1;
interp_mpi(u[lt-1-j],m1[j],m2[j],m3[j],u[0], n1,n2,n3);
resid(u[0],v,r[0],n1,n2,n3, a);
psinv(r[0],u[0],n1,n2,n3,c);
}
/*NOTE: rprj3 projects onto the next coarser grid*/
void rprj3_mpi(REAL*** r, int m1k,int m2k,int m3k, REAL*** s,int m1j,int m2j,int m3j) {
int j3, j2, j1, i3, i2, i1, d1, d2, d3;
double x2,y2;
double *x1,*y1;
static bool rprj3_init = false;
//Private arrays for each thread
static double **_x1;
static double **_y1;
//keeps track of the number of ghost cell regions the processor should wait for- first and last strips require less
int receiveCount = 2;
//Exchange boundary data accross processors
//exchange(r,m1k,m2k,m3k);
//this seems to be incorrect - (it's larger than necessary)
//it initializes for the largest possible array needed (finest level), instead of adjusting to the current level size
if (!rprj3_init)
{
_x1 = (double**)malloc(sizeof(double*)*omp_get_max_threads());
_y1 = (double**)malloc(sizeof(double*)*omp_get_max_threads());
for (i1 = 0; i1 < omp_get_max_threads(); i1++)
{
_x1[i1] = malloc(sizeof(double)*(nm+1));
_y1[i1] = malloc(sizeof(double)*(nm+1));
}
rprj3_init = true;
}
d1 = (m1k==3) ? 2 : 1;
d2 = (m2k==3) ? 2 : 1;
d3 = (m3k==3) ? 2 : 1;
#pragma omp parallel private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)
{
x1 = _x1[omp_get_thread_num()];
y1 = _y1[omp_get_thread_num()];
#pragma omp for
for(j3=2;j3<=m3j-1;j3++)
{
i3 = 2*j3-d3-1;
for(j2=2;j2<=m2j-1;j2++)
{
i2 = 2*j2-d2-1;
for(j1=2;j1<=m1j;j1++)
{
i1 = 2*j1-d1-1;
x1[i1-1] = r[i3][i2-1][i1-1] + r[i3][i2+1][i1-1]
+ r[i3-1][i2][i1-1] + r[i3+1][i2][i1-1];
y1[i1-1] = r[i3-1][i2-1][i1-1] + r[i3+1][i2-1][i1-1]
+ r[i3-1][i2+1][i1-1] + r[i3+1][i2+1][i1-1];
}
for(j1=2;j1<=m1j-1;j1++)
{
i1 = 2*j1-d1-1;
y2 = r[i3-1][i2-1][i1] + r[i3+1][i2-1][i1]
+ r[i3-1][i2+1][i1] + r[i3+1][i2+1][i1];
x2 = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
s[j3-1][j2-1][j1-1] =
0.5 * r[i3][i2][i1]
+ 0.25 * ( r[i3][i2][i1-1]+r[i3][i2][i1+1]+x2)
+ 0.125 * ( x1[i1-1] + x1[i1+1] + y2)
+ 0.0625 * ( y1[i1-1] + y1[i1+1] );
}
}
}
}
comm3(s,m1j,m2j,m3j);
}
void exchange(REAL ***r, int n1,int n2,int n3 ){
REAL ** ghost_data = getGhostCells(r,n1,n2,n3);
//just send the x*y planes- not including the buffer
int messageSize = n1*n2;
int i1,i2;
REAL ** results = exchange_data(ghost_data,messageSize);
// put results into the r matrix
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
if(global_params->mpi_rank != 0 ){
r[0][i2][i1] = results[0][(n1)*i2 + i1];
}
if(global_params->mpi_rank != global_params->mpi_size - 1 ){
r[n3-1][i2][i1] = results[1][(n1)*i2 + i1];
}
}
}
free2D(ghost_data, 2);
free2D(results, 2);
}
//z is the coarser level, and u is the finer level
void interp_mpi(REAL ***z, int mm1, int mm2, int mm3, REAL ***u,
int n1,int n2,int n3 ){
int i3, i2, i1, d1, d2, d3, t1, t2, t3;
// note that m = 1037 in globals.h but for this only need to be
// 535 to handle up to 1024^3
// integer m
// parameter( m=535 )
int m=535;
double *z1,*z2,*z3;
static bool interp_init = false;
//Exchange boundary data accross processors
//exchange(z,mm1,mm2,mm3);
//Private arrays for each thread
static double **_z1;
static double **_z2;
static double **_z3;
if (!interp_init) {
_z1 = (double**)malloc(sizeof(double*)*omp_get_max_threads());
_z2 = (double**)malloc(sizeof(double*)*omp_get_max_threads());
_z3 = (double**)malloc(sizeof(double*)*omp_get_max_threads());
for (i1 = 0; i1 < omp_get_max_threads(); i1++) {
_z1[i1] = malloc(sizeof(double)*m);
_z2[i1] = malloc(sizeof(double)*m);
_z3[i1] = malloc(sizeof(double)*m);
}
interp_init = true;
}
if( n1 != 3 && n2 != 3 && n3 != 3 ) {
#pragma omp parallel private(i1,i2,i3,z1,z2,z3)
{
z1 = _z1[omp_get_thread_num()];
z2 = _z2[omp_get_thread_num()];
z3 = _z3[omp_get_thread_num()];
#pragma omp for
for(i3=1;i3<=mm3-1;i3++)
{
for(i2=1;i2<=mm2-1;i2++)
{
for(i1=1;i1<=mm1;i1++)
{
z1[i1-1] = z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1];
z2[i1-1] = z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1-1];
z3[i1-1] = z[i3][i2][i1-1] + z[i3][i2-1][i1-1] + z1[i1-1];
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-2][2*i2-2][2*i1-2] += z[i3-1][i2-1][i1-1];
u[2*i3-2][2*i2-2][2*i1-1] +=
0.5*(z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-2][2*i2-1][2*i1-2] += 0.5 * z1[i1-1];
u[2*i3-2][2*i2-1][2*i1-1] += 0.25 * (z1[i1-1] + z1[i1] );
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1][2*i2-2][2*i1-2] += 0.5 * z2[i1-1];
u[2*i3-1][2*i2-2][2*i1-1] += 0.25 *(z2[i1-1] + z2[i1] );
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1][2*i2-1][2*i1-2] += 0.25*z3[i1-1];
u[2*i3-1][2*i2-1][2*i1-1] += 0.125*( z3[i1-1] + z3[i1] );
}
}
}
}
} else {
if(n1==3) {
d1 = 2;
t1 = 1;
} else {
d1 = 1;
t1 = 0;
}
if(n2==3) {
d2 = 2;
t2 = 1;
} else {
d2 = 1;
t2 = 0;
}
if(n3==3) {
d3 = 2;
t3 = 1;
} else {
d3 = 1;
t3 = 0;
}
#pragma omp parallel private(i1,i2,i3)
{
#pragma omp for
for(i3=1;i3<=mm3-1;i3++)
{
for(i2=1;i2<=mm2-1;i2++)
{
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-d3][2*i2-1-d2][2*i1-1-d1] +=
z[i3-1][i2-1][i1-1];
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-d3][2*i2-1-d2][2*i1-1-t1] +=
0.5*(z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
}
for(i2=1;i2<=mm2-1;i2++)
{
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-d3][2*i2-1-t2][2*i1-1-d1] +=
0.5*(z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-d3][2*i2-1-t2][2*i1-1-t1] +=
0.25*(z[i3-1][i2][i1] + z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
#pragma omp for nowait
for(i3=1;i3<=mm3-1;i3++)
{
for(i2=1;i2<=mm2-1;i2++)
{
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-t3][2*i2-1-d2][2*i1-1-d1] =
0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]);
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-t3][2*i2-1-d2][2*i1-1-t1] +=
0.25*(z[i3][i2-1][i1] + z[i3][i2-1][i1-1]
+z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]);
}
}
for(i2=1;i2<=mm2-1;i2++)
{
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-t3][2*i2-1-t2][2*i1-1-d1] +=
0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
for(i1=1;i1<=mm1-1;i1++)
{
u[2*i3-1-t3][2*i2-1-t2][2*i1-1-t1] +=
0.125*(z[i3][i2][i1]+z[i3][i2-1][i1]
+z[i3][i2][i1-1]+z[i3][i2-1][i1-1]
+z[i3-1][i2][i1]+z[i3-1][i2-1][i1]
+z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]);
}
}
}
}
}
}
//smoother
void psinv(REAL*** r, REAL*** u, int n1,int n2,int n3, double c[4])
{
//---------------------------------------------------------------------
// psinv applies an approximate inverse as smoother: u = u + Cr
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition and Multiplication.
// Presuming coefficient c(3) is zero (the NPB assumes this,
// but it is thus not a general case), 2A + 1M may be eliminated,
// resulting in 13A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
// double precision u(n1,n2,n3),r(n1,n2,n3),c(0:3)
int i3, i2, i1;
double *r1, *r2;
static bool psinv_init = false;
//Private arrays for each thread
static double **_r1;
static double **_r2;
if (!psinv_init) {
_r1 = (double**)malloc(sizeof(double*)*omp_get_max_threads());
_r2 = (double**)malloc(sizeof(double*)*omp_get_max_threads());
for (i1 = 0; i1 < omp_get_max_threads(); i1++) {
_r1[i1] = malloc(sizeof(double)*(nm+1));
_r2[i1] = malloc(sizeof(double)*(nm+1));
}
psinv_init = true;
}
#pragma omp parallel private(i1,i2,i3,r1,r2)
{
r1 = _r1[omp_get_thread_num()];
r2 = _r2[omp_get_thread_num()];
#pragma omp for
for(i3=1;i3<n3-1;i3++) {
for(i2=1;i2<n2-1;i2++) {
for(i1=0;i1<n1;i1++) {
r1[i1] = r[i3][i2-1][i1]+ r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for(i1=1;i1<n1-1;i1++) {
u[i3][i2][i1] +=
c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
//---------------------------------------------------------------------
// Assume c(3) = 0 (Enable line below if c(3) not= 0)
//---------------------------------------------------------------------
// > + c(3) * ( r2(i1-1) + r2(i1+1) )
//---------------------------------------------------------------------
}
}
}
}
//---------------------------------------------------------------------
// exchange boundary points
//---------------------------------------------------------------------
//Exchange boundary data accross processors
//exchange(u,n1,n2,n3);
comm3(u,n1,n2,n3);
}
void comm3(REAL*** u,int n1,int n2,int n3)
{
//---------------------------------------------------------------------
// comm3 organizes the communication on all borders
//---------------------------------------------------------------------
//exchange around the x-axis
int i1, i2, i3;
int isLast = (global_params->mpi_rank == global_params->mpi_size - 1 ? 1:0);
int isFirst = (global_params->mpi_rank == 0 ? 1:0);
if(global_params->mpi_size > 1){
exchange(u,n1,n2,n3);
}
#pragma omp parallel private(i1,i2,i3, isLast, isFirst)
{
#pragma omp for
for(i3=isFirst;i3<n3-isLast;i3++) {
for(i2=isFirst;i2<n2-isLast;i2++) {
u[i3][i2][0] = u[i3][i2][n1-2];
u[i3][i2][n1-1] = u[i3][i2][1];
}
}
for(i3=isFirst;i3<n3-isLast;i3++)
{
for(i1=0;i1<n1;i1++)
{
u[i3][0][i1] = u[i3][n2-2][i1];
u[i3][n2-1][i1] = u[i3][1][i1];
}
}
}
//Exchange first and last xy-planes between processors
if(global_params->mpi_size == 1){
for(i2=0;i2<n2;i2++) {
for(i1=0;i1<n1;i1++) {
u[0][i2][i1] = u[n3-2][i2][i1];
u[n3-1][i2][i1] = u[1][i2][i1];
}
}
} else {
//use mpi to exchange this data
if(global_params->mpi_rank == 0 || global_params->mpi_rank == global_params->mpi_size - 1){
//Similar call to getGhostCells- might be able to reuse it (offset is not the same though, needs to be tested)
MPI_Request r_req = MPI_REQUEST_NULL, s_req = MPI_REQUEST_NULL;
MPI_Status status;
REAL * ghost_cells = (REAL*) malloc(sizeof(REAL)*n1*n2);
REAL * message = (REAL*) malloc(sizeof(REAL*)*n1*n2);
if(global_params->mpi_rank == 0){
//get first plane
for(i2=0;i2<n2;i2++) {
for(i1=0;i1<n1;i1++) {
ghost_cells[(i2)*(n1) + i1] = u[1][i2][i1];
}
}
//send/receive front plane
//MPI_Send(ghost_cells, n1*n2, MPI_DOUBLE, global_params->mpi_size - 1, 3, MPI_COMM_WORLD);
//MPI_Recv(message,n1*n2,MPI_DOUBLE, global_params->mpi_size - 1, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Irecv(message, n1*n2, MPI_DOUBLE, global_params->mpi_size - 1, 3,MPI_COMM_WORLD, &r_req);
MPI_Isend(ghost_cells, n1*n2, MPI_DOUBLE, global_params->mpi_size - 1, 3, MPI_COMM_WORLD, &s_req);
MPI_Wait(&r_req, &status);
MPI_Wait(&s_req, &status);
for(i2=0;i2<n2;i2++) {
for(i1=0;i1<n1;i1++) {
u[0][i2][i1] = message[(i2)*(n1) + i1];
}
}
}
if(global_params->mpi_rank == global_params->mpi_size-1){
//get last plane
for(i2=0;i2<n2;i2++) {
for(i1=0;i1<n1;i1++) {
ghost_cells[(i2)*(n1) + i1] = u[n3-2][i2][i1];
}
}
//receive/send back plane
//MPI_Recv(message,n1*n2,MPI_DOUBLE, 0, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
//MPI_Send(ghost_cells, n1*n2, MPI_DOUBLE, 0, 3, MPI_COMM_WORLD);
MPI_Irecv(message,n1*n2, MPI_DOUBLE, 0, 3,MPI_COMM_WORLD, &r_req);
MPI_Isend(ghost_cells, n1*n2, MPI_DOUBLE, 0, 3, MPI_COMM_WORLD, &s_req);
MPI_Wait(&r_req, &status);
MPI_Wait(&s_req, &status);
for(i2=0;i2<n2;i2++) {
for(i1=0;i1<n1;i1++) {
u[n3-1][i2][i1] = message[(i2)*(n1) + i1];
}
}
}
free(ghost_cells);
free(message);
}
}
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> CilkHintHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseCilkSpawnStatement();
StmtResult ParseCilkSyncStatement();
StmtResult ParseSpawnStatement();
StmtResult ParseSyncStatement();
StmtResult ParseForallStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseCilkForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parses OpenMP context selectors and calls \p Callback for each
/// successfully parsed context selector.
bool
parseOpenMPContextSelectors(SourceLocation Loc,
SmallVectorImpl<Sema::OMPCtxSelectorData> &Data);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLastLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
GB_unaryop__abs_int32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_uint8
// op(A') function: GB_tran__abs_int32_uint8
// C type: int32_t
// A type: uint8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_uint8
(
int32_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DMD5_fmt_plug.c | /*
* DMD5_fmt.c
*
* DIGEST-MD5 authentication module for Solar Designer's John the Ripper
* Uses Solar Designer's MD5 implementation.
*
* This software is Copyright 2006, regenrecht@o2.pl, and
* Copyright 2011, 2013 magnum, and it is hereby released to the general
* public under the following terms: Redistribution and use in source and
* binary forms, with or without modification, are permitted.
*
* Input format:
* $DIGEST-MD5$ username $ realm $ nonce $ digest_uri $ cnonce $ nc $ qop $ response [ $ authzid ]
*
* Just base64-decode the blob you see when sniffing, to get all data needed for above.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DMD5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "md5.h"
#include "common.h"
#include "formats.h"
#include "memdbg.h"
#define FORMAT_LABEL "dmd5"
#define FORMAT_NAME "DIGEST-MD5 C/R"
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define FORMAT_TAG "$DIGEST-MD5$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MD5_HEX_SIZE (2 * BINARY_SIZE)
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(cur_salt)
#define SALT_ALIGN 1
#define DSIZE (128 - sizeof(int))
#define CIPHERTEXT_LENGTH (DSIZE * 4)
#define PLAINTEXT_LENGTH 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const char itoa16_shr_04[] =
"0000000000000000"
"1111111111111111"
"2222222222222222"
"3333333333333333"
"4444444444444444"
"5555555555555555"
"6666666666666666"
"7777777777777777"
"8888888888888888"
"9999999999999999"
"aaaaaaaaaaaaaaaa"
"bbbbbbbbbbbbbbbb"
"cccccccccccccccc"
"dddddddddddddddd"
"eeeeeeeeeeeeeeee"
"ffffffffffffffff";
static const char itoa16_and_0f[] =
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef"
"0123456789abcdef";
static struct {
unsigned char login_id[DSIZE]; // username:realm
unsigned int login_id_len;
unsigned char nonces[DSIZE]; // :nonce:cnonce[:authzid]
unsigned int nonces_len;
unsigned char prehash_KD[DSIZE]; // :nonce:nc:cnonce:qop:hex_A2_hash
unsigned int prehash_KD_len;
} cur_salt;
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/4];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static struct fmt_tests tests[] = {
{"$DIGEST-MD5$s3443$pjwstk$00$ldap/10.253.34.43$0734d94ad9abd5bd7fc5e7e77bcf49a8$00000001$auth-int$dd98347e6da3efd6c4ff2263a729ef77", "test"},
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
PLAINTEXT_LENGTH + 1);
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
BINARY_SIZE);
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *data = ciphertext + FORMAT_TAG_LEN;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
if (strlen(ciphertext) > CIPHERTEXT_LENGTH)
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64) // username
return 0;
data = p + 1; // realm
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // nonce
if (!(p = strchr(data, '$')) || (int)(p-data) >= 64)
return 0;
data = p + 1; // digest_uri
if (!(p = strchr(data, '$')) || (int)(p-data) >= DSIZE)
return 0;
data = p + 1; // cnonce
if (!(p = strchr(data, '$')) || (int)(p-data) > MD5_HEX_SIZE)
return 0;
if (hexlenl(data, 0) != p-data)
return 0;
data = p + 1; // nc
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // qop
if (strncmp(data, "auth", 4) && strncmp(data, "auth-int", 8) &&
strncmp(data, "auth-conf", 9))
return 0;
if (!(p = strchr(data, '$')) || (int)(p-data) >= 9)
return 0;
data = p + 1; // authzid, optional
if ((p = strchr(data, '$'))) {
if ((int)(p-data) > MD5_HEX_SIZE || strlen(&p[1]) >= 8)
return 0;
} else if (strlen(data) > MD5_HEX_SIZE)
return 0;
if (hexlenl(data, &extra) != MD5_HEX_SIZE || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static ARCH_WORD_32 out[BINARY_SIZE/4];
char response[MD5_HEX_SIZE + 1];
unsigned int i;
char *p, *data = ciphertext + FORMAT_TAG_LEN;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$'); data = p + 1;
p = strchr(data, '$');
if (p && (p - data + 1) < sizeof(response))
strnzcpy(response, data, p - data + 1);
else
strnzcpy(response, data, sizeof(response));
for (i = 0; i < BINARY_SIZE; ++i)
((unsigned char*)out)[i] =
(atoi16[ARCH_INDEX(response[i*2])] << 4)
+ atoi16[ARCH_INDEX(response[i*2+1])];
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
char username[64];
char realm[64];
char nonce[64];
char digest_uri[DSIZE];
char cnonce[MD5_HEX_SIZE + 1];
char nc[9];
char qop[9];
char authzid[8];
unsigned char *ptr_src, *ptr_dst, v, i;
char *ccopy = strdup(ciphertext);
char *p, *data = ccopy + FORMAT_TAG_LEN;
MD5_CTX ctx;
char A2[DSIZE];
unsigned char hash[BINARY_SIZE];
unsigned char hex_hash[2*MD5_HEX_SIZE];
if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(username, data, sizeof(username));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(realm, data, sizeof(realm));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nonce, data, sizeof(nonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(digest_uri, data, sizeof(digest_uri));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(cnonce, data, sizeof(cnonce));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(nc, data, sizeof(nc));
data = p + 1; if ((p = strchr(data, '$'))) *p = 0;
strnzcpy(qop, data, sizeof(qop));
data = p + 1;
if ((p = strchr(data, '$'))) {
*p = 0;
data = p + 1;
if (*data)
strnzcpy(authzid, data, sizeof(authzid));
else
*authzid = 0;
} else {
*authzid = 0;
}
if (!strcmp(qop, "auth"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s", digest_uri);
else if (!strcmp(qop, "auth-int") || !strcmp(qop, "auth-conf"))
snprintf((char*)A2, sizeof(A2),
"AUTHENTICATE:%s:00000000000000000000000000000000",
digest_uri);
MD5_Init(&ctx);
MD5_Update(&ctx, A2, strlen((char*)A2));
MD5_Final(hash, &ctx);
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
*ptr_dst = 0;
snprintf((char*)cur_salt.prehash_KD, sizeof(cur_salt.prehash_KD),
":%s:%s:%s:%s:%s", nonce, nc, cnonce, qop, hex_hash);
cur_salt.prehash_KD_len = strlen((char*)cur_salt.prehash_KD);
if (authzid[0])
snprintf((char*)cur_salt.nonces, sizeof(cur_salt.nonces),
":%s:%s:%s", nonce, cnonce, authzid);
else
snprintf((char*)cur_salt.nonces, sizeof(cur_salt.nonces),
":%s:%s", nonce, cnonce);
cur_salt.nonces_len = strlen((char*)cur_salt.nonces);
snprintf((char*)cur_salt.login_id, sizeof(cur_salt.login_id),
"%s:%s:", username, realm);
cur_salt.login_id_len = strlen((char*)cur_salt.login_id);
MEM_FREE(ccopy);
return (void*)&cur_salt;
}
static void set_salt(void *salt)
{
memcpy(&cur_salt, salt, sizeof(cur_salt));
}
static void set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char hash[16];
unsigned char hex_hash[MD5_HEX_SIZE];
unsigned char *ptr_src, *ptr_dst;
MD5_CTX ctx;
int i;
MD5_Init(&ctx);
// "username:realm"
MD5_Update(&ctx, cur_salt.login_id, cur_salt.login_id_len);
// "password"
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(hash, &ctx);
MD5_Init(&ctx);
// previous result
MD5_Update(&ctx, hash, BINARY_SIZE);
// ":nonce:cnonce[:authzid]"
MD5_Update(&ctx, cur_salt.nonces, cur_salt.nonces_len);
MD5_Final(hash, &ctx);
// hexify
ptr_src = hash;
ptr_dst = hex_hash;
for (i = 0; i < BINARY_SIZE; ++i) {
unsigned char v = *ptr_src++;
*ptr_dst++ = itoa16_shr_04[ARCH_INDEX(v)];
*ptr_dst++ = itoa16_and_0f[ARCH_INDEX(v)];
}
MD5_Init(&ctx);
// previous result, in hex
MD5_Update(&ctx, hex_hash, MD5_HEX_SIZE);
// ":nonce:nc:cnonce:qop:hex_A2_hash
MD5_Update(&ctx, cur_salt.prehash_KD, cur_salt.prehash_KD_len);
MD5_Final((unsigned char*)crypt_key[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
int index;
ARCH_WORD_32 b = ((ARCH_WORD_32*)binary)[0];
for (index = 0; index < count; index++)
if (crypt_key[index][0] == b)
return 1;
return 0;
#else
return ((ARCH_WORD_32*)binary)[0] == crypt_key[0][0];
#endif
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
struct fmt_main fmt_DMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
m_mesh_split.h | //
// Created by Harold on 2021/2/18.
//
#ifndef M_MATH_M_MESH_SPLIT_H
#define M_MATH_M_MESH_SPLIT_H
#include <open3d/Open3D.h>
#include <unordered_map>
#include <unordered_set>
#include <queue>
#include <omp.h>
namespace M_MATH {
struct TmpMesh {
std::vector<Eigen::Vector3d> vertices;
std::vector<Eigen::Vector3i> triangles;
std::unordered_map<size_t, size_t> vertices_idx; // global : local
};
// https://gdbooks.gitbooks.io/3dcollisions/content/Chapter2/static_aabb_plane.html
bool BoundPlaneIntersect(open3d::geometry::OrientedBoundingBox const& bound,
Eigen::Vector3d const& plane_center,
Eigen::Vector3d const& plane_normal) {
auto bound_center = bound.GetCenter(); // AABB center
auto bound_extents = bound.GetMaxBound() - bound_center; // positive extents
// Compute the projection interval radius of b onto L(t) = b.c + t * p.n
auto r = bound_extents.x() * std::abs(plane_normal.x()) + bound_extents.y() * std::abs(plane_normal.y()) + bound_extents.z() * std::abs(plane_normal.z());
// Compute distance of box center from plane
auto s = (bound_center - plane_center).dot(plane_normal);
// Intersection occurs when distance s falls within [-r,+r] interval
return std::abs(s) <= r;
}
bool LinePlaneIntersect(Eigen::Vector3d const& p0,
Eigen::Vector3d const& p1,
Eigen::Vector3d const& plane_center,
Eigen::Vector3d const& plane_normal,
Eigen::Vector3d& intersect_point,
double epsilon = 1e-6) { //std::numeric_limits<double>::epsilon()
auto line_dir = (p1 - p0).normalized();
auto d = plane_normal.dot(line_dir);
if (std::abs(d) < epsilon)
return false;
auto t = (plane_normal.dot(plane_center) - plane_normal.dot(p0)) / d;
intersect_point = p0 + line_dir * t;
return true;
}
/*
* | /| v1
* | / |
* |/ |
* i0 | |
* /| |
* / | |
* v0 /__|___| v2
* | i1
*/
bool TrianglePlaneIntersect(std::vector<Eigen::Vector3d> const& vertices,
std::vector<Eigen::Vector3i> const& triangles,
size_t triangle_idx,
Eigen::Vector3d const& plane_center,
Eigen::Vector3d const& plane_normal,
TmpMesh& l_tmp_mesh,
TmpMesh& r_tmp_mesh) {
auto triangle = triangles[triangle_idx];
auto is_triangle_vertices_in_mesh = [](Eigen::Vector3i triangle, TmpMesh const& mesh) {
return std::array<bool, 3>{mesh.vertices_idx.find(triangle.x()) != mesh.vertices_idx.end(),
mesh.vertices_idx.find(triangle.y()) != mesh.vertices_idx.end(),
mesh.vertices_idx.find(triangle.z()) != mesh.vertices_idx.end()};
};
// triangle vertices all in one side mesh
auto bls = is_triangle_vertices_in_mesh(triangle, l_tmp_mesh);
if (bls[0] == bls[1] && bls[1] == bls[2]) {
bls[0] ? l_tmp_mesh.triangles.emplace_back(l_tmp_mesh.vertices_idx[triangle.x()], l_tmp_mesh.vertices_idx[triangle.y()], l_tmp_mesh.vertices_idx[triangle.z()])
: r_tmp_mesh.triangles.emplace_back(r_tmp_mesh.vertices_idx[triangle.x()], r_tmp_mesh.vertices_idx[triangle.y()], r_tmp_mesh.vertices_idx[triangle.z()]);
return false;
}
// find lonely vertex
int v0 = (bls[0] != bls[1]) ? (bls[0] != bls[2] ? 0 : 1) : 2;
int v1 = (v0 + 1) % 3;
int v2 = (v0 + 2) % 3;
/*
// add all intersected triangles into l_mesh
if (bls[v0]) {
auto Nl = l_tmp_mesh.vertices.size();
l_tmp_mesh.vertices.push_back(vertices[triangle(v1)]);
l_tmp_mesh.vertices.push_back(vertices[triangle(v2)]);
l_tmp_mesh.triangles.push_back(
Eigen::Vector3i(l_tmp_mesh.vertices_idx[triangle(v0)], Nl, Nl + 1));
} else if (bls[v1] && bls[v2]) {
auto Nl = l_tmp_mesh.vertices.size();
l_tmp_mesh.vertices.push_back(vertices[triangle(v0)]);
l_tmp_mesh.triangles.push_back(Eigen::Vector3i(
l_tmp_mesh.vertices_idx[triangle(v1)],
l_tmp_mesh.vertices_idx[triangle(v2)], Nl));
}
*/
///*
// get insert points
Eigen::Vector3d i0, i1;
LinePlaneIntersect(vertices[triangle(v0)], vertices[triangle(v1)], plane_center, plane_normal, i0);
LinePlaneIntersect(vertices[triangle(v0)], vertices[triangle(v2)], plane_center, plane_normal, i1);
auto Nl = l_tmp_mesh.vertices.size();
auto Nr = r_tmp_mesh.vertices.size();
l_tmp_mesh.vertices.push_back(i0);
l_tmp_mesh.vertices.push_back(i1);
r_tmp_mesh.vertices.push_back(i0);
r_tmp_mesh.vertices.push_back(i1);
// create new triangles: [v0, i0, i1], [i0, v1, v2], [v2, i1, i0]
bls[v0] ? l_tmp_mesh.triangles.emplace_back(l_tmp_mesh.vertices_idx[triangle(v0)], Nl, Nl + 1)
: r_tmp_mesh.triangles.emplace_back(r_tmp_mesh.vertices_idx[triangle(v0)], Nr, Nr + 1);
bls[v1] ? l_tmp_mesh.triangles.emplace_back(Nl, l_tmp_mesh.vertices_idx[triangle(v1)], l_tmp_mesh.vertices_idx[triangle(v2)])
: r_tmp_mesh.triangles.emplace_back(Nr, r_tmp_mesh.vertices_idx[triangle(v1)], r_tmp_mesh.vertices_idx[triangle(v2)]);
bls[v2] ? l_tmp_mesh.triangles.emplace_back(l_tmp_mesh.vertices_idx[triangle(v2)], Nl + 1, Nl)
: r_tmp_mesh.triangles.emplace_back(r_tmp_mesh.vertices_idx[triangle(v2)], Nr + 1, Nr);
//*/
return true;
}
/**
* @brief cut mesh into 2 parts by input plane
* @param mesh
* @param plane_center
* @param plane_normal
* @param l_mesh
* @param r_mesh
* @return successfully cut or not
*/
bool MeshCut(open3d::geometry::TriangleMesh const& mesh,
Eigen::Vector3d const& plane_center,
Eigen::Vector3d const& plane_normal,
open3d::geometry::TriangleMesh& l_mesh,
open3d::geometry::TriangleMesh& r_mesh) {
// 1. check whether bounding box intersect
if (!BoundPlaneIntersect(mesh.GetOrientedBoundingBox(), plane_center, plane_normal))
return false;
TmpMesh l_tmp_mesh, r_tmp_mesh;
// 2. separate vertices
auto& vertices = mesh.vertices_;
auto N = vertices.size();
l_tmp_mesh.vertices.reserve(N);
l_tmp_mesh.vertices_idx.reserve(N);
r_tmp_mesh.vertices.reserve(N);
r_tmp_mesh.vertices_idx.reserve(N);
for (size_t i = 0; i < N; i++) {
if ((vertices[i] - plane_center).dot(plane_normal) >= 0) {
l_tmp_mesh.vertices.push_back(vertices[i]);
l_tmp_mesh.vertices_idx.insert({ i, l_tmp_mesh.vertices.size() - 1 });
}
else {
r_tmp_mesh.vertices.push_back(vertices[i]);
r_tmp_mesh.vertices_idx.insert({ i, r_tmp_mesh.vertices.size() - 1 });
}
}
// if one of mesh's vertices is empty, no intersect
if (l_tmp_mesh.vertices.empty() || r_tmp_mesh.vertices.empty())
return false;
// 3. separate triangles and cut tirangles which intersected with plane
auto& triangles = mesh.triangles_;
auto M = triangles.size();
l_tmp_mesh.triangles.reserve(M);
r_tmp_mesh.triangles.reserve(M);
for (auto i = 0; i < M; i++)
TrianglePlaneIntersect(vertices, triangles, i, plane_center, plane_normal, l_tmp_mesh, r_tmp_mesh);
l_tmp_mesh.vertices.shrink_to_fit();
l_tmp_mesh.triangles.shrink_to_fit();
r_tmp_mesh.vertices.shrink_to_fit();
r_tmp_mesh.triangles.shrink_to_fit();
// 4. assign result to l_mesh, r_mesh
l_mesh = open3d::geometry::TriangleMesh(l_tmp_mesh.vertices, l_tmp_mesh.triangles);
r_mesh = open3d::geometry::TriangleMesh(r_tmp_mesh.vertices, r_tmp_mesh.triangles);
/*
// 5. remove duplicated
l_mesh.RemoveDuplicatedVertices();
l_mesh.RemoveDuplicatedTriangles();
r_mesh.RemoveDuplicatedVertices();
r_mesh.RemoveDuplicatedTriangles();
*/
return true;
}
/**
* @brief cluster mesh triangles according to triangle connectivity
* @param mesh
* @return <triangle clusters, number of triangles in clusters>
*/
std::pair<std::vector<size_t>, std::vector<size_t>>
ClusterTriangles(open3d::geometry::TriangleMesh const& mesh) {
std::vector<size_t> triangle_clusters(mesh.triangles_.size(), -1); // -1 as unclustered
std::vector<size_t> num_triangles;
// compute triangle adjacency
auto edges_to_triangles = mesh.GetEdgeToTrianglesMap(); // edge to triangle idxs: std::unordered_map<Eigen::Vector2i, std::vector<int>>
std::vector<std::unordered_set<int>> adjacency_list(mesh.triangles_.size()); // triangle idx to its adjacent triangles
#pragma omp parallel for schedule(static)
for (auto tidx = 0; tidx < mesh.triangles_.size(); ++tidx) {
const auto& triangle = mesh.triangles_[tidx];
for (auto tri : edges_to_triangles[open3d::geometry::TriangleMesh::GetOrderedEdge(triangle(0), triangle(1))])
adjacency_list[tidx].insert(tri);
for (auto tri : edges_to_triangles[open3d::geometry::TriangleMesh::GetOrderedEdge(triangle(0), triangle(2))])
adjacency_list[tidx].insert(tri);
for (auto tri : edges_to_triangles[open3d::geometry::TriangleMesh::GetOrderedEdge(triangle(1), triangle(2))])
adjacency_list[tidx].insert(tri);
}
// cluster triangles
size_t cluster_idx = 0;
for (auto tidx = 0; tidx < mesh.triangles_.size(); ++tidx) {
// clustered
if (triangle_clusters[tidx] != -1) {
continue;
}
// un-clustered
std::queue<size_t> triangle_queue;
size_t cluster_n_triangles = 0;
triangle_queue.push(tidx);
triangle_clusters[tidx] = cluster_idx;
while (!triangle_queue.empty()) {
auto cluster_tidx = triangle_queue.front();
triangle_queue.pop();
cluster_n_triangles++;
for (auto tri : adjacency_list[cluster_tidx]) {
if (triangle_clusters[tri] == -1) {
triangle_queue.push(tri);
triangle_clusters[tri] = cluster_idx;
}
}
}
num_triangles.push_back(cluster_n_triangles);
cluster_idx++;
}
return std::make_pair(triangle_clusters, num_triangles);
}
/**
* @brief split mesh triangles into clusters
* @param mesh
* @param cluster_min_size
* @return cluster of triangle indices
*/
std::unordered_map<size_t, std::vector<size_t>>
MeshTrianglesSplit(open3d::geometry::TriangleMesh const& mesh,
size_t cluster_min_size = 10) {
auto triangle_clusters_nums = ClusterTriangles(mesh);
auto const& triangle_clusters = triangle_clusters_nums.first;
auto const& num_triangles = triangle_clusters_nums.second;
// cluster idx to triangle idxs
std::unordered_map<size_t, std::vector<size_t>> cluster_idx_triangles;
// valid cluster at least has 10 triangles
for (auto i = 0; i < num_triangles.size(); i++) {
if (num_triangles[i] >= cluster_min_size) {
std::vector<size_t> v;
v.reserve(num_triangles[i]);
cluster_idx_triangles.insert({ i, v });
}
}
for (auto i = 0; i < triangle_clusters.size(); i++) {
if (cluster_idx_triangles.find(triangle_clusters[i]) != cluster_idx_triangles.end()) {
cluster_idx_triangles[triangle_clusters[i]].emplace_back(i);
}
}
return cluster_idx_triangles;
}
/**
* @brief split mesh triangles into clusters
* @param mesh
* @param cluster_min_area
* @return
*/
std::unordered_map<size_t, std::vector<size_t>>
MeshTrianglesSplit(open3d::geometry::TriangleMesh const& mesh,
double cluster_min_area) {
auto triangle_clusters_nums_areas = mesh.ClusterConnectedTriangles();
auto const& triangle_clusters = std::get<0>(triangle_clusters_nums_areas);
auto const& num_triangles = std::get<1>(triangle_clusters_nums_areas);
auto const& areas = std::get<2>(triangle_clusters_nums_areas);
// cluster idx to triangle idxs
std::unordered_map<size_t, std::vector<size_t>> cluster_idx_triangles;
// valid cluster at least has area of cluster_min_area
for (auto i = 0; i < num_triangles.size(); i++) {
if (areas[i] >= cluster_min_area) {
std::vector<size_t> v;
v.reserve(num_triangles[i]);
cluster_idx_triangles.insert({ i, v });
}
}
for (auto i = 0; i < triangle_clusters.size(); i++) {
if (cluster_idx_triangles.find(triangle_clusters[i]) != cluster_idx_triangles.end()) {
cluster_idx_triangles[triangle_clusters[i]].emplace_back(i);
}
}
return cluster_idx_triangles;
}
/**
* @brief split mesh into sub-mesh clusters
* @param mesh
* @param cluster_min_size
* @return cluster of meshes
*/
std::unordered_map<size_t, std::shared_ptr<open3d::geometry::TriangleMesh>>
MeshSplit(open3d::geometry::TriangleMesh const& mesh,
size_t cluster_min_size = 10) {
auto triangle_clusters_nums = ClusterTriangles(mesh);
auto const& triangle_clusters = triangle_clusters_nums.first;
auto const& num_triangles = triangle_clusters_nums.second;
// cluster idx to vertices idxs
std::unordered_map<size_t, std::unordered_set<size_t>> cluster_idx_vertices;
// valid cluster at least has 10 triangles
for (auto i = 0; i < num_triangles.size(); i++) {
if (num_triangles[i] >= cluster_min_size) {
std::unordered_set<size_t> v;
v.reserve(num_triangles[i] * 3);
cluster_idx_vertices.insert({ i, v });
}
}
for (auto i = 0; i < triangle_clusters.size(); i++) {
if (cluster_idx_vertices.find(triangle_clusters[i]) != cluster_idx_vertices.end()) {
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](0));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](1));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](2));
}
}
std::unordered_map<size_t, std::shared_ptr<open3d::geometry::TriangleMesh>> res;
std::vector<size_t> vertices;
for (auto const& cluster : cluster_idx_vertices) {
vertices.reserve(cluster.second.size());
for (auto const& vertex_idx : cluster.second)
vertices.push_back(vertex_idx);
res[cluster.first] = mesh.SelectByIndex(vertices, false); // triangles already unique
vertices.clear();
}
return res;
}
/**
* @brief split mesh into sub-mesh clusters
* @param mesh
* @param cluster_min_area
* @return
*/
std::unordered_map<size_t, std::shared_ptr<open3d::geometry::TriangleMesh>>
MeshSplit(open3d::geometry::TriangleMesh const& mesh,
double cluster_min_area) {
auto triangle_clusters_nums_areas = mesh.ClusterConnectedTriangles();
auto const& triangle_clusters = std::get<0>(triangle_clusters_nums_areas);
auto const& num_triangles = std::get<1>(triangle_clusters_nums_areas);
auto const& areas = std::get<2>(triangle_clusters_nums_areas);
// cluster idx to vertices idxs
std::unordered_map<size_t, std::unordered_set<size_t>> cluster_idx_vertices;
// valid cluster at least has area of cluster_min_area
for (auto i = 0; i < num_triangles.size(); i++) {
if (areas[i] >= cluster_min_area) {
std::unordered_set<size_t> v;
v.reserve(num_triangles[i] * 3);
cluster_idx_vertices.insert({ i, v });
}
}
for (auto i = 0; i < triangle_clusters.size(); i++) {
if (cluster_idx_vertices.find(triangle_clusters[i]) != cluster_idx_vertices.end()) {
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](0));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](1));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](2));
}
}
std::unordered_map<size_t, std::shared_ptr<open3d::geometry::TriangleMesh>> res;
std::vector<size_t> vertices;
for (auto const& cluster : cluster_idx_vertices) {
vertices.reserve(cluster.second.size());
for (auto const& vertex_idx : cluster.second)
vertices.push_back(vertex_idx);
res[cluster.first] = mesh.SelectByIndex(vertices, false); // triangles already unique
vertices.clear();
}
return res;
}
/**
* @brief split mesh into sub-mesh clusters
* @param mesh
* @param cluster_min_size
* @return cluster of meshes
*/
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>>
MeshSplit2(open3d::geometry::TriangleMesh const& mesh,
size_t cluster_min_size = 10) {
auto triangle_clusters_nums = ClusterTriangles(mesh);
auto const& triangle_clusters = triangle_clusters_nums.first;
auto const& num_triangles = triangle_clusters_nums.second;
// cluster idx to vertices idxs
std::unordered_map<size_t, std::unordered_set<size_t>> cluster_idx_vertices;
cluster_idx_vertices.reserve(num_triangles.size());
// valid cluster at least has `cluster_min_size` triangles
for (auto i = 0; i < num_triangles.size(); i++) {
if (num_triangles[i] >= cluster_min_size) {
std::unordered_set<size_t> v;
v.reserve(num_triangles[i] * 3);
cluster_idx_vertices.insert({ i, v });
}
}
for (auto i = 0; i < triangle_clusters.size(); i++) {
if (cluster_idx_vertices.find(triangle_clusters[i]) != cluster_idx_vertices.end()) {
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](0));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](1));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](2));
}
}
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> res;
res.reserve(cluster_idx_vertices.size());
std::vector<size_t> vertices;
for (auto const& cluster : cluster_idx_vertices) {
vertices.reserve(cluster.second.size());
for (auto const& vertex_idx : cluster.second)
vertices.push_back(vertex_idx);
res.push_back(mesh.SelectByIndex(vertices, false)); // triangles already unique
vertices.clear();
}
return res;
}
/**
* @brief split mesh into sub-mesh clusters
* @param mesh
* @param cluster_min_area
* @return
*/
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>>
MeshSplit2(open3d::geometry::TriangleMesh const& mesh,
double cluster_min_area) {
auto triangle_clusters_nums_areas = mesh.ClusterConnectedTriangles();
auto const& triangle_clusters = std::get<0>(triangle_clusters_nums_areas);
auto const& num_triangles = std::get<1>(triangle_clusters_nums_areas);
auto const& areas = std::get<2>(triangle_clusters_nums_areas);
// cluster idx to vertices indices
std::unordered_map<size_t, std::unordered_set<size_t>> cluster_idx_vertices;
cluster_idx_vertices.reserve(num_triangles.size());
// valid cluster at least has area of cluster_min_area
for (auto i = 0; i < num_triangles.size(); i++) {
if (areas[i] >= cluster_min_area) {
std::unordered_set<size_t> v;
v.reserve(num_triangles[i] * 3);
cluster_idx_vertices.insert({ i, v });
}
}
for (auto i = 0; i < triangle_clusters.size(); i++) {
if (cluster_idx_vertices.find(triangle_clusters[i]) != cluster_idx_vertices.end()) {
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](0));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](1));
cluster_idx_vertices[triangle_clusters[i]].insert(mesh.triangles_[i](2));
}
}
std::vector<std::shared_ptr<open3d::geometry::TriangleMesh>> res;
res.reserve(cluster_idx_vertices.size());
std::vector<size_t> vertices;
for (auto const& cluster : cluster_idx_vertices) {
vertices.reserve(cluster.second.size());
for (auto const& vertex_idx : cluster.second)
vertices.push_back(vertex_idx);
res.push_back(mesh.SelectByIndex(vertices, false)); // triangles already unique
vertices.clear();
}
return res;
}
/**
* @brief crop mesh with input bounding box
* @param mesh
* @param bbox
* @param get_outside if true, return mesh outside the bbox, otherwise, return mesh inside the bbox
* @return cropped mesh
*/
std::shared_ptr<open3d::geometry::TriangleMesh>
MeshCrop(open3d::geometry::TriangleMesh const& mesh,
open3d::geometry::OrientedBoundingBox const& bbox,
bool get_outside = true) {
if (bbox.IsEmpty())
throw;
if (!get_outside)
return mesh.Crop(bbox);
auto const& points = mesh.vertices_;
std::vector<size_t> indices;
indices.reserve(points.size());
Eigen::Vector3d dx(1, 0, 0);
Eigen::Vector3d dy(0, 1, 0);
Eigen::Vector3d dz(0, 0, 1);
for (size_t idx = 0; idx < points.size(); idx++) {
Eigen::Vector3d d = points[idx] - bbox.center_;
if (std::abs(d.dot(dx)) > bbox.extent_(0) / 2 ||
std::abs(d.dot(dy)) > bbox.extent_(1) / 2 ||
std::abs(d.dot(dz)) > bbox.extent_(2) / 2) {
indices.push_back(idx);
}
}
return mesh.SelectByIndex(indices);
}
/**
* @brief crop mesh with input bounding box
* @param mesh
* @param bbox
* @param get_outside if true, return mesh outside the bbox, otherwise, return mesh inside the bbox
* @return cropped mesh
*/
std::shared_ptr<open3d::geometry::TriangleMesh>
MeshCrop(open3d::geometry::TriangleMesh const& mesh,
open3d::geometry::AxisAlignedBoundingBox const& bbox,
bool get_outside = true) {
if (bbox.IsEmpty())
throw;
if (!get_outside)
return mesh.Crop(bbox);
auto const& points = mesh.vertices_;
std::vector<size_t> indices;
indices.reserve(points.size());
for (size_t idx = 0; idx < points.size(); idx++) {
const auto& point = points[idx];
if (point(0) < bbox.min_bound_(0) || point(0) > bbox.max_bound_(0) ||
point(1) < bbox.min_bound_(1) || point(1) > bbox.max_bound_(1) ||
point(2) < bbox.min_bound_(2) || point(2) > bbox.max_bound_(2)) {
indices.push_back(idx);
}
}
return mesh.SelectByIndex(indices);
}
}
#endif //M_MATH_M_MESH_SPLIT_H
|
VolumetricFractionalMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/VolumetricFractionalMaxPooling.c"
#else
static int64_t* THNN_(VolumetricFractionalMaxPooling_generateIntervals)(
scalar_t sample,
int64_t inputSize,
int64_t outputSize,
int poolSize) {
scalar_t alpha = (scalar_t) (inputSize - poolSize) / (scalar_t) (outputSize - 1);
int64_t* sequence = (int64_t*) THAlloc(sizeof(int64_t) * outputSize);
int64_t i;
for (i = 0; i < outputSize - 1; ++i) {
sequence[i] =
(int64_t) ((i + sample) * alpha) - (int64_t) (sample * alpha);
}
sequence[outputSize - 1] = inputSize - poolSize;
return sequence;
}
static void THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)(
scalar_t* input,
scalar_t* output,
THIndex_t* indices,
scalar_t* randomSamples,
int64_t numPlanes,
int64_t inputT, int64_t inputW, int64_t inputH,
int64_t outputT, int64_t outputW, int64_t outputH,
int poolSizeT, int poolSizeW, int poolSizeH) {
int64_t plane;
#pragma omp parallel for private(plane)
for (plane = 0; plane < numPlanes; ++plane) {
/* each plane contains 3 random samples, one for T, one for W, and one for H */
scalar_t* randomSamplesForPlane = randomSamples + plane * 3;
/* Generate interval sequence */
int64_t* sequenceT =
THNN_(VolumetricFractionalMaxPooling_generateIntervals)(
randomSamplesForPlane[0], inputT, outputT, poolSizeT);
int64_t* sequenceW =
THNN_(VolumetricFractionalMaxPooling_generateIntervals)(
randomSamplesForPlane[1], inputW, outputW, poolSizeW);
int64_t* sequenceH =
THNN_(VolumetricFractionalMaxPooling_generateIntervals)(
randomSamplesForPlane[2], inputH, outputH, poolSizeH);
/* loop over output */
int64_t h, w, t;
scalar_t* inputForPlane = input + plane * inputT * inputW * inputH;
scalar_t* outputForPlane = output + plane * outputT * outputW * outputH;
THIndex_t* indicesForPlane = indices + plane * outputT * outputW * outputH;
for (h = 0; h < outputH; ++h) {
int64_t inputHStart = sequenceH[h];
for (w = 0; w < outputW; ++w) {
int64_t inputWStart = sequenceW[w];
for (t = 0; t < outputT; ++t) {
int64_t inputTStart = sequenceT[t];
scalar_t maxVal = -THInf;
int64_t maxIndex = -1;
int64_t h2, w2, t2;
for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) {
for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) {
for (t2 = inputTStart; t2 < inputTStart + poolSizeT; ++t2) {
THAssert(h2 >= 0 && h2 < inputH);
THAssert(w2 >= 0 && w2 < inputW);
THAssert(t2 >= 0 && t2 < inputT);
int64_t planeIndex = h2 * inputW * inputT + w2 * inputT + t2;
scalar_t val = inputForPlane[planeIndex];
if (val > maxVal) {
maxVal = val;
maxIndex = planeIndex;
}
}
}
}
THAssert(maxVal != -THInf);
THAssert(maxIndex != -1);
outputForPlane[h * outputW * outputT + w * outputT + t] = maxVal;
/* +1 to lua index */
indicesForPlane[h * outputW * outputT + w * outputT + t] = maxIndex + TH_INDEX_BASE;
}
}
}
THFree(sequenceT);
THFree(sequenceW);
THFree(sequenceH);
}
}
void THNN_(VolumetricFractionalMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int outputT, int outputW, int outputH,
int poolSizeT, int poolSizeW, int poolSizeH,
THIndexTensor *indices,
THTensor *randomSamples) {
int64_t numBatch = 1;
int planeDim = 0;
int heightDim = 1;
int widthDim = 2;
int timeDim = 3;
int64_t numInputDims = THTensor_(nDimensionLegacyNoScalars)(input);
THNN_ARGCHECK(!input->is_empty() && (numInputDims == 4 || numInputDims == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
if (numInputDims == 5) {
numBatch = THTensor_(size)(input, 0);
planeDim++;
heightDim++;
widthDim++;
timeDim++;
}
/* sizes */
int64_t numPlanes = THTensor_(size)(input, planeDim);
int64_t inputH = THTensor_(size)(input, heightDim);
int64_t inputW = THTensor_(size)(input, widthDim);
int64_t inputT = THTensor_(size)(input, timeDim);
THArgCheck(outputH + poolSizeH - 1 < inputH, 9,
"poolSizeH (%d) too large relative to input height (%d)",
poolSizeH, inputH);
THArgCheck(outputW + poolSizeW - 1 < inputW, 8,
"poolSizeW (%d) too large relative to input width (%d)",
poolSizeW, inputW);
THArgCheck(outputT + poolSizeT - 1 < inputT, 7,
"poolSizeT (%d) too large relative to input time (%d)",
poolSizeT, inputT);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (numInputDims == 4) {
/* resize output */
THTensor_(resize4d)(output, numPlanes, outputH, outputW, outputT);
/* indices will contain the locations for each output point */
THIndexTensor_(resize4d)(indices, numPlanes, outputH, outputW, outputT);
THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)(
input->data<scalar_t>(),
output->data<scalar_t>(),
THIndexTensor_(data)(indices),
randomSamples->data<scalar_t>(),
numPlanes, inputT, inputW, inputH,
outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH);
} else {
THTensor_(resize5d)(output, numBatch, numPlanes, outputH, outputW, outputT);
/* indices will contain the locations for each output point */
THIndexTensor_(resize5d)(indices, numBatch, numPlanes, outputH, outputW, outputT);
int64_t batch;
#pragma omp parallel for private(batch)
for (batch = 0; batch < numBatch; ++batch) {
THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)(
input->data<scalar_t>() + batch * numPlanes * inputH * inputW * inputT,
output->data<scalar_t>() + batch * numPlanes * outputH * outputW * outputT,
THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW * outputT,
randomSamples->data<scalar_t>() + batch * numPlanes * 3,
numPlanes, inputT, inputW, inputH,
outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
}
static void THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)(
scalar_t* gradInput,
scalar_t* gradOutput,
THIndex_t* indices,
int64_t numPlanes,
int64_t inputT, int64_t inputW, int64_t inputH,
int64_t outputT, int64_t outputW, int64_t outputH) {
int64_t plane;
#pragma omp parallel for private(plane)
for (plane = 0; plane < numPlanes; plane++) {
scalar_t* gradInputForPlane = gradInput + plane * inputT * inputW * inputH;
scalar_t* gradOutputForPlane = gradOutput + plane * outputT * outputW * outputH;
THIndex_t* indicesForPlane = indices + plane * outputT * outputW * outputH;
int64_t h, w, t;
for (h = 0; h < outputH; ++h) {
for (w = 0; w < outputW; ++w) {
for (t = 0; t < outputT; ++t) {
int64_t outputIndex = h * outputW * outputT + w * outputT + t;
int64_t index = indicesForPlane[outputIndex] - TH_INDEX_BASE;
THAssert(index >= 0 && index < inputT * inputW * inputH);
gradInputForPlane[index] += gradOutputForPlane[outputIndex];
}
}
}
}
}
void THNN_(VolumetricFractionalMaxPooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int outputT, int outputW, int outputH,
int poolSizeT, int poolSizeW, int poolSizeH,
THIndexTensor *indices) {
int64_t numBatch = 1;
int planeDim = 0;
int heightDim = 1;
int widthDim = 2;
int timeDim = 3;
int64_t numInputDims = THTensor_(nDimensionLegacyNoScalars)(input);
if (numInputDims == 5) {
numBatch = THTensor_(size)(input, 0);
planeDim = 1;
heightDim++;
widthDim++;
timeDim++;
}
/* sizes */
int64_t numPlanes = THTensor_(size)(input, planeDim);
int64_t inputH = THTensor_(size)(input, heightDim);
int64_t inputW = THTensor_(size)(input, widthDim);
int64_t inputT = THTensor_(size)(input, timeDim);
THArgCheck(outputT == THTensor_(size)(gradOutput, timeDim), 3,
"gradOutput time unexpected");
THArgCheck(outputW == THTensor_(size)(gradOutput, widthDim), 3,
"gradOutput width unexpected");
THArgCheck(outputH == THTensor_(size)(gradOutput, heightDim), 3,
"gradOutput height unexpected");
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* backprop */
if (numInputDims == 4) {
THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)(
gradInput->data<scalar_t>(),
gradOutput->data<scalar_t>(),
THIndexTensor_(data)(indices),
numPlanes, inputT, inputW, inputH, outputT, outputW, outputH);
} else {
int64_t batch;
#pragma omp parallel for private(batch)
for (batch = 0; batch < numBatch; ++batch) {
THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)(
gradInput->data<scalar_t>() + batch * numPlanes * inputH * inputW * inputT,
gradOutput->data<scalar_t>() + batch * numPlanes * outputH * outputW * outputT,
THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW * outputT,
numPlanes, inputT, inputW, inputH, outputT, outputW, outputH);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
}
#endif
|
nanort.h | //
// NanoRT, single header only modern ray tracing kernel.
//
/*
The MIT License (MIT)
Copyright (c) 2015 Light Transport Entertainment, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef __NANORT_H__
#define __NANORT_H__
#include <vector>
#include <queue>
#include <cmath>
#include <limits>
#include <cstdlib>
#include <cstring>
#include <string>
namespace nanort {
// Parallelized BVH build is not yet fully tested,
// thus turn off if you face a problem when building BVH.
#define NANORT_ENABLE_PARALLEL_BUILD (0)
// Small vector class useful for multi-threaded environment.
//
// stack_container.h
//
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//#include "base/basictypes.h"
// This allocator can be used with STL containers to provide a stack buffer
// from which to allocate memory and overflows onto the heap. This stack buffer
// would be allocated on the stack and allows us to avoid heap operations in
// some situations.
//
// STL likes to make copies of allocators, so the allocator itself can't hold
// the data. Instead, we make the creator responsible for creating a
// StackAllocator::Source which contains the data. Copying the allocator
// merely copies the pointer to this shared source, so all allocators created
// based on our allocator will share the same stack buffer.
//
// This stack buffer implementation is very simple. The first allocation that
// fits in the stack buffer will use the stack buffer. Any subsequent
// allocations will not use the stack buffer, even if there is unused room.
// This makes it appropriate for array-like containers, but the caller should
// be sure to reserve() in the container up to the stack buffer size. Otherwise
// the container will allocate a small array which will "use up" the stack
// buffer.
template <typename T, size_t stack_capacity>
class StackAllocator : public std::allocator<T> {
public:
typedef typename std::allocator<T>::pointer pointer;
typedef typename std::allocator<T>::size_type size_type;
// Backing store for the allocator. The container owner is responsible for
// maintaining this for as long as any containers using this allocator are
// live.
struct Source {
Source() : used_stack_buffer_(false) {}
// Casts the buffer in its right type.
T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
const T* stack_buffer() const {
return reinterpret_cast<const T*>(stack_buffer_);
}
//
// IMPORTANT: Take care to ensure that stack_buffer_ is aligned
// since it is used to mimic an array of T.
// Be careful while declaring any unaligned types (like bool)
// before stack_buffer_.
//
// The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD
// buffer of the right size instead.
char stack_buffer_[sizeof(T[stack_capacity])];
// Set when the stack buffer is used for an allocation. We do not track
// how much of the buffer is used, only that somebody is using it.
bool used_stack_buffer_;
};
// Used by containers when they want to refer to an allocator of type U.
template <typename U> struct rebind {
typedef StackAllocator<U, stack_capacity> other;
};
// For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
: source_(rhs.source_) {}
// ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error
// in the class _Container_base_aux_alloc_real (from <xutility>)
// if the constructor does not exist.
// For this constructor, we cannot share storage; there's
// no guarantee that the Source buffer of Ts is large enough
// for Us.
// TODO: If we were fancy pants, perhaps we could share storage
// iff sizeof(T) == sizeof(U).
template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity>& other)
: source_(NULL) {}
explicit StackAllocator(Source* source) : source_(source) {}
// Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard
// allocator.
pointer allocate(size_type n, void* hint = 0) {
if (source_ != NULL && !source_->used_stack_buffer_ &&
n <= stack_capacity) {
source_->used_stack_buffer_ = true;
return source_->stack_buffer();
}
else {
return std::allocator<T>::allocate(n, hint);
}
}
// Free: when trying to free the stack buffer, just mark it as free. For
// non-stack-buffer pointers, just fall though to the standard allocator.
void deallocate(pointer p, size_type n) {
if (source_ != NULL && p == source_->stack_buffer())
source_->used_stack_buffer_ = false;
else
std::allocator<T>::deallocate(p, n);
}
private:
Source* source_;
};
// A wrapper around STL containers that maintains a stack-sized buffer that the
// initial capacity of the vector is based on. Growing the container beyond the
// stack capacity will transparently overflow onto the heap. The container must
// support reserve().
//
// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
// type. This object is really intended to be used only internally. You'll want
// to use the wrappers below for different types.
template <typename TContainerType, int stack_capacity> class StackContainer {
public:
typedef TContainerType ContainerType;
typedef typename ContainerType::value_type ContainedType;
typedef StackAllocator<ContainedType, stack_capacity> Allocator;
// Allocator must be constructed before the container!
StackContainer() : allocator_(&stack_data_), container_(allocator_) {
// Make the container use the stack allocation by reserving our buffer size
// before doing anything else.
container_.reserve(stack_capacity);
}
// Getters for the actual container.
//
// Danger: any copies of this made using the copy constructor must have
// shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects.
ContainerType& container() { return container_; }
const ContainerType& container() const { return container_; }
// Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo;
// std::sort(foo->begin(), foo->end());
ContainerType* operator->() { return &container_; }
const ContainerType* operator->() const { return &container_; }
#ifdef UNIT_TEST
// Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly.
const typename Allocator::Source& stack_data() const { return stack_data_; }
#endif
protected:
typename Allocator::Source stack_data_;
Allocator allocator_;
ContainerType container_;
// DISALLOW_EVIL_CONSTRUCTORS(StackContainer);
StackContainer(const StackContainer&);
void operator=(const StackContainer&);
};
// StackString
template <size_t stack_capacity>
class StackString
: public StackContainer<
std::basic_string<char, std::char_traits<char>,
StackAllocator<char, stack_capacity> >,
stack_capacity> {
public:
StackString()
: StackContainer<std::basic_string<char, std::char_traits<char>,
StackAllocator<char, stack_capacity> >,
stack_capacity>() {}
private:
// DISALLOW_EVIL_CONSTRUCTORS(StackString);
StackString(const StackString&);
void operator=(const StackString&);
};
// StackWString
template <size_t stack_capacity>
class StackWString
: public StackContainer<
std::basic_string<wchar_t, std::char_traits<wchar_t>,
StackAllocator<wchar_t, stack_capacity> >,
stack_capacity> {
public:
StackWString()
: StackContainer<
std::basic_string<wchar_t, std::char_traits<wchar_t>,
StackAllocator<wchar_t, stack_capacity> >,
stack_capacity>() {}
private:
// DISALLOW_EVIL_CONSTRUCTORS(StackWString);
StackWString(const StackWString&);
void operator=(const StackWString&);
};
// StackVector
//
// Example:
// StackVector<int, 16> foo;
// foo->push_back(22); // we have overloaded operator->
// foo[0] = 10; // as well as operator[]
template <typename T, size_t stack_capacity>
class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity> {
public:
StackVector()
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {}
// We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will
// take the stack buffer from the original. Here, we create an empty object
// and make a stack buffer of its own.
StackVector(const StackVector<T, stack_capacity>& other)
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {
this->container().assign(other->begin(), other->end());
}
StackVector<T, stack_capacity>&
operator=(const StackVector<T, stack_capacity>& other) {
this->container().assign(other->begin(), other->end());
return *this;
}
// Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want).
T& operator[](size_t i) { return this->container().operator[](i); }
const T& operator[](size_t i) const {
return this->container().operator[](i);
}
};
namespace {
struct float3 {
float3() {}
float3(float xx, float yy, float zz) {
x = xx;
y = yy;
z = zz;
}
float3(const float* p) {
x = p[0];
y = p[1];
z = p[2];
}
float3 operator*(float f) const { return float3(x * f, y * f, z * f); }
float3 operator-(const float3& f2) const {
return float3(x - f2.x, y - f2.y, z - f2.z);
}
float3 operator*(const float3& f2) const {
return float3(x * f2.x, y * f2.y, z * f2.z);
}
float3 operator+(const float3& f2) const {
return float3(x + f2.x, y + f2.y, z + f2.z);
}
float3& operator+=(const float3& f2) {
x += f2.x;
y += f2.y;
z += f2.z;
return (*this);
}
float3 operator/(const float3& f2) const {
return float3(x / f2.x, y / f2.y, z / f2.z);
}
float operator[](int i) const { return (&x)[i]; }
float& operator[](int i) { return (&x)[i]; }
float3 neg() { return float3(-x, -y, -z); }
float length() { return sqrtf(x * x + y * y + z * z); }
void normalize() {
float len = length();
if (fabs(len) > 1.0e-6f) {
float inv_len = 1.0f / len;
x *= inv_len;
y *= inv_len;
z *= inv_len;
}
}
float x, y, z;
// float pad; // for alignment
};
inline float3 operator*(float f, const float3& v) {
return float3(v.x * f, v.y * f, v.z * f);
}
inline float3 vcross(float3 a, float3 b) {
float3 c;
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
return c;
}
inline float vdot(float3 a, float3 b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
} // namespace
typedef struct {
float t;
float u;
float v;
unsigned int faceID;
} Intersection;
typedef struct {
float org[3]; // must set
float dir[3]; // must set
float invDir[3]; // filled internally
int dirSign[3]; // filled internally
} Ray;
class BVHNode {
public:
BVHNode() {};
~BVHNode() {};
float bmin[3];
float bmax[3];
int flag; // 1 = leaf node, 0 = branch node
int axis;
// leaf
// data[0] = npoints
// data[1] = index
//
// branch
// data[0] = child[0]
// data[1] = child[1]
unsigned int data[2];
};
namespace {
class IsectComparator {
public:
bool operator()(const Intersection& a, const Intersection& b) const {
return a.t < b.t;
}
};
// Stores furthest intersection at top
typedef std::priority_queue<Intersection, std::vector<Intersection>,
IsectComparator> IsectVector;
template <typename T> class Matrix {
public:
void Print(T m[4][4]) {
for (int i = 0; i < 4; i++) {
printf("m[%d] = %f, %f, %f, %f\n", i, m[i][0], m[i][1], m[i][2], m[i][3]);
}
}
void Identity(T m[4][4]) {
m[0][0] = 1.0;
m[0][1] = 0.0;
m[0][2] = 0.0;
m[0][3] = 0.0;
m[1][0] = 0.0;
m[1][1] = 1.0;
m[1][2] = 0.0;
m[1][3] = 0.0;
m[2][0] = 0.0;
m[2][1] = 0.0;
m[2][2] = 1.0;
m[2][3] = 0.0;
m[3][0] = 0.0;
m[3][1] = 0.0;
m[3][2] = 0.0;
m[3][3] = 1.0;
}
void Inverse(T m[4][4]) {
/*
* codes from intel web
* cramer's rule version
*/
int i, j;
T tmp[12]; /* tmp array for pairs */
T tsrc[16]; /* array of transpose source matrix */
T det; /* determinant */
/* transpose matrix */
for (i = 0; i < 4; i++) {
tsrc[i] = m[i][0];
tsrc[i + 4] = m[i][1];
tsrc[i + 8] = m[i][2];
tsrc[i + 12] = m[i][3];
}
/* calculate pair for first 8 elements(cofactors) */
tmp[0] = tsrc[10] * tsrc[15];
tmp[1] = tsrc[11] * tsrc[14];
tmp[2] = tsrc[9] * tsrc[15];
tmp[3] = tsrc[11] * tsrc[13];
tmp[4] = tsrc[9] * tsrc[14];
tmp[5] = tsrc[10] * tsrc[13];
tmp[6] = tsrc[8] * tsrc[15];
tmp[7] = tsrc[11] * tsrc[12];
tmp[8] = tsrc[8] * tsrc[14];
tmp[9] = tsrc[10] * tsrc[12];
tmp[10] = tsrc[8] * tsrc[13];
tmp[11] = tsrc[9] * tsrc[12];
/* calculate first 8 elements(cofactors) */
m[0][0] = tmp[0] * tsrc[5] + tmp[3] * tsrc[6] + tmp[4] * tsrc[7];
m[0][0] -= tmp[1] * tsrc[5] + tmp[2] * tsrc[6] + tmp[5] * tsrc[7];
m[0][1] = tmp[1] * tsrc[4] + tmp[6] * tsrc[6] + tmp[9] * tsrc[7];
m[0][1] -= tmp[0] * tsrc[4] + tmp[7] * tsrc[6] + tmp[8] * tsrc[7];
m[0][2] = tmp[2] * tsrc[4] + tmp[7] * tsrc[5] + tmp[10] * tsrc[7];
m[0][2] -= tmp[3] * tsrc[4] + tmp[6] * tsrc[5] + tmp[11] * tsrc[7];
m[0][3] = tmp[5] * tsrc[4] + tmp[8] * tsrc[5] + tmp[11] * tsrc[6];
m[0][3] -= tmp[4] * tsrc[4] + tmp[9] * tsrc[5] + tmp[10] * tsrc[6];
m[1][0] = tmp[1] * tsrc[1] + tmp[2] * tsrc[2] + tmp[5] * tsrc[3];
m[1][0] -= tmp[0] * tsrc[1] + tmp[3] * tsrc[2] + tmp[4] * tsrc[3];
m[1][1] = tmp[0] * tsrc[0] + tmp[7] * tsrc[2] + tmp[8] * tsrc[3];
m[1][1] -= tmp[1] * tsrc[0] + tmp[6] * tsrc[2] + tmp[9] * tsrc[3];
m[1][2] = tmp[3] * tsrc[0] + tmp[6] * tsrc[1] + tmp[11] * tsrc[3];
m[1][2] -= tmp[2] * tsrc[0] + tmp[7] * tsrc[1] + tmp[10] * tsrc[3];
m[1][3] = tmp[4] * tsrc[0] + tmp[9] * tsrc[1] + tmp[10] * tsrc[2];
m[1][3] -= tmp[5] * tsrc[0] + tmp[8] * tsrc[1] + tmp[11] * tsrc[2];
/* calculate pairs for second 8 elements(cofactors) */
tmp[0] = tsrc[2] * tsrc[7];
tmp[1] = tsrc[3] * tsrc[6];
tmp[2] = tsrc[1] * tsrc[7];
tmp[3] = tsrc[3] * tsrc[5];
tmp[4] = tsrc[1] * tsrc[6];
tmp[5] = tsrc[2] * tsrc[5];
tmp[6] = tsrc[0] * tsrc[7];
tmp[7] = tsrc[3] * tsrc[4];
tmp[8] = tsrc[0] * tsrc[6];
tmp[9] = tsrc[2] * tsrc[4];
tmp[10] = tsrc[0] * tsrc[5];
tmp[11] = tsrc[1] * tsrc[4];
/* calculate second 8 elements(cofactors) */
m[2][0] = tmp[0] * tsrc[13] + tmp[3] * tsrc[14] + tmp[4] * tsrc[15];
m[2][0] -= tmp[1] * tsrc[13] + tmp[2] * tsrc[14] + tmp[5] * tsrc[15];
m[2][1] = tmp[1] * tsrc[12] + tmp[6] * tsrc[14] + tmp[9] * tsrc[15];
m[2][1] -= tmp[0] * tsrc[12] + tmp[7] * tsrc[14] + tmp[8] * tsrc[15];
m[2][2] = tmp[2] * tsrc[12] + tmp[7] * tsrc[13] + tmp[10] * tsrc[15];
m[2][2] -= tmp[3] * tsrc[12] + tmp[6] * tsrc[13] + tmp[11] * tsrc[15];
m[2][3] = tmp[5] * tsrc[12] + tmp[8] * tsrc[13] + tmp[11] * tsrc[14];
m[2][3] -= tmp[4] * tsrc[12] + tmp[9] * tsrc[13] + tmp[10] * tsrc[14];
m[3][0] = tmp[2] * tsrc[10] + tmp[5] * tsrc[11] + tmp[1] * tsrc[9];
m[3][0] -= tmp[4] * tsrc[11] + tmp[0] * tsrc[9] + tmp[3] * tsrc[10];
m[3][1] = tmp[8] * tsrc[11] + tmp[0] * tsrc[8] + tmp[7] * tsrc[10];
m[3][1] -= tmp[6] * tsrc[10] + tmp[9] * tsrc[11] + tmp[1] * tsrc[8];
m[3][2] = tmp[6] * tsrc[9] + tmp[11] * tsrc[11] + tmp[3] * tsrc[8];
m[3][2] -= tmp[10] * tsrc[11] + tmp[2] * tsrc[8] + tmp[7] * tsrc[9];
m[3][3] = tmp[10] * tsrc[10] + tmp[4] * tsrc[8] + tmp[9] * tsrc[9];
m[3][3] -= tmp[8] * tsrc[9] + tmp[11] * tsrc[0] + tmp[5] * tsrc[8];
/* calculate determinant */
det = tsrc[0] * m[0][0] + tsrc[1] * m[0][1] + tsrc[2] * m[0][2] +
tsrc[3] * m[0][3];
/* calculate matrix inverse */
det = 1.0 / det;
for (j = 0; j < 4; j++) {
for (i = 0; i < 4; i++) {
m[j][i] *= det;
}
}
}
void Transpose(T m[4][4]) {
T t[4][4];
// Transpose
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
t[j][i] = m[i][j];
}
}
// Copy
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
m[j][i] = t[j][i];
}
}
}
void Mult(T dst[4][4], const T m0[4][4], const T m1[4][4]) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
dst[i][j] = 0;
for (int k = 0; k < 4; ++k) {
dst[i][j] += m0[k][j] * m1[i][k];
}
}
}
}
void MultV(T dst[3], const T m[4][4], const T v[3]) {
T tmp[3];
tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0];
tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1];
tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2];
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
void MultV(float3& dst, const T m[4][4], const float3& v) {
T tmp[3];
tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0];
tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1];
tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2];
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
};
}
///< BVH build option.
struct BVHBuildOptions {
float costTaabb;
int minLeafPrimitives;
int maxTreeDepth;
int binSize;
int shallowDepth;
size_t minPrimitivesForParallelBuild;
// Cache bounding box computation.
// Requires more memory, but BVHbuild can be faster.
bool cacheBBox;
// Set default value: Taabb = 0.2
BVHBuildOptions()
: costTaabb(0.2f), minLeafPrimitives(4), maxTreeDepth(256), binSize(64),
shallowDepth(3), minPrimitivesForParallelBuild(1024 * 128),
cacheBBox(false) {}
};
///< BVH build statistics.
class BVHBuildStatistics {
public:
int maxTreeDepth;
int numLeafNodes;
int numBranchNodes;
float epsScale;
double buildSecs;
// Set default value: Taabb = 0.2
BVHBuildStatistics()
: maxTreeDepth(0), numLeafNodes(0), numBranchNodes(0), epsScale(1.0f),
buildSecs(0.0) {}
};
///< BVH trace option.
class BVHTraceOptions {
public:
// Hit only for face IDs in indexRange.
// This feature is good to mimic something like glDrawArrays()
unsigned int faceIdsRange[2];
BVHTraceOptions() {
faceIdsRange[0] = 0;
faceIdsRange[1] = 0x7FFFFFFF; // Up to 2G face IDs.
}
};
class BBox {
public:
float bmin[3];
float bmax[3];
BBox() {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
}
};
class BVHAccel {
public:
BVHAccel() : epsScale_(1.0f) {};
~BVHAccel() {};
///< Build BVH for input mesh.
bool Build(const float* vertices, const unsigned int* faces,
const unsigned int numFaces, const BVHBuildOptions& options);
///< Get statistics of built BVH tree. Valid after Build()
BVHBuildStatistics GetStatistics() const { return stats_; }
///< Dump built BVH to the file.
bool Dump(const char* filename);
/// Load BVH binary
bool Load(const char* filename);
///< Traverse into BVH along ray and find closest hit point if found
bool Traverse(Intersection& isect, const float* vertices,
const unsigned int* faces, const Ray& ray, const BVHTraceOptions& options);
///< Multi-hit ray tracversal
///< Returns `maxIntersections` frontmost intersections
bool MultiHitTraverse(StackVector<Intersection, 128>& isects,
int maxIntersections, const float* vertices,
const unsigned int* faces, Ray& ray);
const std::vector<BVHNode>& GetNodes() const { return nodes_; }
const std::vector<unsigned int>& GetIndices() const { return indices_; }
void BoundingBox(float bmin[3], float bmax[3]) const {
if (nodes_.empty()) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
}
else {
bmin[0] = nodes_[0].bmin[0];
bmin[1] = nodes_[0].bmin[1];
bmin[2] = nodes_[0].bmin[2];
bmax[0] = nodes_[0].bmax[0];
bmax[1] = nodes_[0].bmax[1];
bmax[2] = nodes_[0].bmax[2];
}
}
private:
#if NANORT_ENABLE_PARALLEL_BUILD
typedef struct {
unsigned int leftIdx;
unsigned int rightIdx;
unsigned int offset;
} ShallowNodeInfo;
// Used only during BVH construction
std::vector<ShallowNodeInfo> shallowNodeInfos_;
///< Builds shallow BVH tree recursively.
unsigned int BuildShallowTree(std::vector<BVHNode>& outNodes,
const float* vertices,
const unsigned int* faces, unsigned int leftIdx,
unsigned int rightIdx, int depth,
int maxShallowDepth, float epsScale);
#endif
///< Builds BVH tree recursively.
size_t BuildTree(BVHBuildStatistics& outStat, std::vector<BVHNode>& outNodes,
const float* vertices, const unsigned int* faces,
unsigned int leftIdx, unsigned int rightIdx, int depth,
float epsScale);
BVHBuildOptions options_;
std::vector<BVHNode> nodes_;
std::vector<unsigned int> indices_; // max 4G triangles.
BVHBuildStatistics stats_;
float epsScale_;
std::vector<BBox> bboxes_;
};
#if 0
class BVHBox
{
}
class Scene
{
std::vector<BVHBox> nodes_;
};
#endif
} // namespace nanort
#ifdef NANORT_IMPLEMENTATION
#include <limits>
#include <cassert>
#include <algorithm>
#include <functional>
//
// SAH functions
//
namespace nanort {
struct BinBuffer {
BinBuffer(int size) {
binSize = size;
bin.resize(2 * 3 * size);
clear();
}
void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * binSize); }
std::vector<size_t> bin; // (min, max) * xyz * binsize
int binSize;
};
inline float CalculateSurfaceArea(const float3& min, const float3& max) {
float3 box = max - min;
return 2.0f * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]);
}
inline void GetBoundingBoxOfTriangle(float3& bmin, float3& bmax,
const float* vertices,
const unsigned int* faces,
unsigned int index) {
unsigned int f0 = faces[3 * index + 0];
unsigned int f1 = faces[3 * index + 1];
unsigned int f2 = faces[3 * index + 2];
float3 p[3];
p[0] = float3(&vertices[3 * f0]);
p[1] = float3(&vertices[3 * f1]);
p[2] = float3(&vertices[3 * f2]);
bmin = p[0];
bmax = p[0];
for (int i = 1; i < 3; i++) {
bmin[0] = std::min(bmin[0], p[i][0]);
bmin[1] = std::min(bmin[1], p[i][1]);
bmin[2] = std::min(bmin[2], p[i][2]);
bmax[0] = std::max(bmax[0], p[i][0]);
bmax[1] = std::max(bmax[1], p[i][1]);
bmax[2] = std::max(bmax[2], p[i][2]);
}
}
void ContributeBinBuffer(BinBuffer* bins, // [out]
const float3& sceneMin, const float3& sceneMax,
const float* vertices, const unsigned int* faces,
unsigned int* indices, unsigned int leftIdx,
unsigned int rightIdx, float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
float binSize = (float)bins->binSize;
// Calculate extent
float3 sceneSize, sceneInvSize;
sceneSize = sceneMax - sceneMin;
for (int i = 0; i < 3; ++i) {
assert(sceneSize[i] >= 0.0);
if (sceneSize[i] > kEPS) {
sceneInvSize[i] = binSize / sceneSize[i];
}
else {
sceneInvSize[i] = 0.0;
}
}
// Clear bin data
std::fill(bins->bin.begin(), bins->bin.end(), 0);
// memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->binSize));
size_t idxBMin[3];
size_t idxBMax[3];
for (size_t i = leftIdx; i < rightIdx; i++) {
//
// Quantize the position into [0, BIN_SIZE)
//
// q[i] = (int)(p[i] - scene_bmin) / scene_size
//
float3 bmin;
float3 bmax;
GetBoundingBoxOfTriangle(bmin, bmax, vertices, faces, indices[i]);
float3 quantizedBMin = (bmin - sceneMin) * sceneInvSize;
float3 quantizedBMax = (bmax - sceneMin) * sceneInvSize;
// idx is now in [0, BIN_SIZE)
for (int j = 0; j < 3; ++j) {
int q0 = (int)quantizedBMin[j];
if (q0 < 0)
q0 = 0;
int q1 = (int)quantizedBMax[j];
if (q1 < 0)
q1 = 0;
idxBMin[j] = (unsigned int)q0;
idxBMax[j] = (unsigned int)q1;
if (idxBMin[j] >= binSize)
idxBMin[j] = (size_t)binSize - 1;
if (idxBMax[j] >= binSize)
idxBMax[j] = (size_t)binSize - 1;
assert(idxBMin[j] < binSize);
assert(idxBMax[j] < binSize);
// Increment bin counter
bins->bin[0 * (bins->binSize * 3) + j * bins->binSize + idxBMin[j]] += 1;
bins->bin[1 * (bins->binSize * 3) + j * bins->binSize + idxBMax[j]] += 1;
}
}
}
inline float SAH(size_t ns1, float leftArea, size_t ns2, float rightArea,
float invS, float Taabb, float Ttri) {
// const float Taabb = 0.2f;
// const float Ttri = 0.8f;
float T;
T = 2.0f * Taabb + (leftArea * invS) * (float)(ns1)*Ttri +
(rightArea * invS) * (float)(ns2)*Ttri;
return T;
}
bool FindCutFromBinBuffer(float* cutPos, // [out] xyz
int& minCostAxis, // [out]
const BinBuffer* bins, const float3& bmin,
const float3& bmax, size_t numTriangles,
float costTaabb, // should be in [0.0, 1.0]
float epsScale) {
const float eps = std::numeric_limits<float>::epsilon() * epsScale;
size_t left, right;
float3 bsize, bstep;
float3 bminLeft, bmaxLeft;
float3 bminRight, bmaxRight;
float saLeft, saRight, saTotal;
float pos;
float minCost[3];
float costTtri = 1.0f - costTaabb;
minCostAxis = 0;
bsize = bmax - bmin;
bstep = bsize * (1.0f / bins->binSize);
saTotal = CalculateSurfaceArea(bmin, bmax);
float invSaTotal = 0.0f;
if (saTotal > eps) {
invSaTotal = 1.0f / saTotal;
}
for (int j = 0; j < 3; ++j) {
//
// Compute SAH cost for right side of each cell of the bbox.
// Exclude both extreme side of the bbox.
//
// i: 0 1 2 3
// +----+----+----+----+----+
// | | | | | |
// +----+----+----+----+----+
//
float minCostPos = bmin[j] + 0.5f * bstep[j];
minCost[j] = std::numeric_limits<float>::max();
left = 0;
right = numTriangles;
bminLeft = bminRight = bmin;
bmaxLeft = bmaxRight = bmax;
for (int i = 0; i < bins->binSize - 1; ++i) {
left += bins->bin[0 * (3 * bins->binSize) + j * bins->binSize + i];
right -= bins->bin[1 * (3 * bins->binSize) + j * bins->binSize + i];
assert(left <= numTriangles);
assert(right <= numTriangles);
//
// Split pos bmin + (i + 1) * (bsize / BIN_SIZE)
// +1 for i since we want a position on right side of the cell.
//
pos = bmin[j] + (i + 0.5f) * bstep[j];
bmaxLeft[j] = pos;
bminRight[j] = pos;
saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft);
saRight = CalculateSurfaceArea(bminRight, bmaxRight);
float cost =
SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri);
if (cost < minCost[j]) {
//
// Update the min cost
//
minCost[j] = cost;
minCostPos = pos;
// minCostAxis = j;
}
}
cutPos[j] = minCostPos;
}
// cutAxis = minCostAxis;
// cutPos = minCostPos;
// Find min cost axis
float cost = minCost[0];
minCostAxis = 0;
if (cost > minCost[1]) {
minCostAxis = 1;
cost = minCost[1];
}
if (cost > minCost[2]) {
minCostAxis = 2;
cost = minCost[2];
}
return true;
}
class SAHPred : public std::unary_function<unsigned int, bool> {
public:
SAHPred(int axis, float pos, const float* vertices, const unsigned int* faces)
: axis_(axis), pos_(pos), vertices_(vertices), faces_(faces) {}
bool operator()(unsigned int i) const {
int axis = axis_;
float pos = pos_;
unsigned int i0 = faces_[3 * i + 0];
unsigned int i1 = faces_[3 * i + 1];
unsigned int i2 = faces_[3 * i + 2];
float3 p0(&vertices_[3 * i0]);
float3 p1(&vertices_[3 * i1]);
float3 p2(&vertices_[3 * i2]);
float center = p0[axis] + p1[axis] + p2[axis];
return (center < pos * 3.0f);
}
private:
int axis_;
float pos_;
const float* vertices_;
const unsigned int* faces_;
};
#ifdef _OPENMP
void ComputeBoundingBoxOMP(float3& bmin, float3& bmax, const float* vertices,
const unsigned int* faces, unsigned int* indices,
unsigned int leftIndex, unsigned int rightIndex,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
long long n = rightIndex - leftIndex;
bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS;
bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS;
bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS;
bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS;
bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS;
bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS;
float local_bmin[3] = { bmin[0], bmin[1], bmin[2] };
float local_bmax[3] = { bmax[0], bmax[1], bmax[2] };
#pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128))
{
#pragma omp for
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k] - kEPS;
float maxval = vertices[3 * fid + k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
}
#pragma omp critical
{
for (int k = 0; k < 3; k++) {
if (local_bmin[k] < bmin[k]) {
{
if (local_bmin[k] < bmin[k])
bmin[k] = local_bmin[k];
}
}
if (local_bmax[k] > bmax[k]) {
{
if (local_bmax[k] > bmax[k])
bmax[k] = local_bmax[k];
}
}
}
}
}
}
#endif
void ComputeBoundingBox(float3& bmin, float3& bmax, const float* vertices,
const unsigned int* faces, unsigned int* indices,
unsigned int leftIndex, unsigned int rightIndex,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS;
bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS;
bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS;
bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS;
bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS;
bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS;
float local_bmin[3] = { bmin[0], bmin[1], bmin[2] };
float local_bmax[3] = { bmax[0], bmax[1], bmax[2] };
{
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k] - kEPS;
float maxval = vertices[3 * fid + k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
}
for (int k = 0; k < 3; k++) {
bmin[k] = local_bmin[k];
bmax[k] = local_bmax[k];
}
}
}
void GetBoundingBox(float3& bmin, float3& bmax, std::vector<BBox>& bboxes,
unsigned int* indices, unsigned int leftIndex,
unsigned int rightIndex, float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
bmin[0] = bboxes[idx].bmin[0] - kEPS;
bmin[1] = bboxes[idx].bmin[1] - kEPS;
bmin[2] = bboxes[idx].bmin[2] - kEPS;
bmax[0] = bboxes[idx].bmax[0] + kEPS;
bmax[1] = bboxes[idx].bmax[1] + kEPS;
bmax[2] = bboxes[idx].bmax[2] + kEPS;
float local_bmin[3] = { bmin[0], bmin[1], bmin[2] };
float local_bmax[3] = { bmax[0], bmax[1], bmax[2] };
{
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int k = 0; k < 3; k++) { // xyz
float minval = bboxes[idx].bmin[k] - kEPS;
float maxval = bboxes[idx].bmax[k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
for (int k = 0; k < 3; k++) {
bmin[k] = local_bmin[k];
bmax[k] = local_bmax[k];
}
}
}
//
// --
//
#if NANORT_ENABLE_PARALLEL_BUILD
unsigned int BVHAccel::BuildShallowTree(std::vector<BVHNode>& outNodes,
const float* vertices,
const unsigned int* faces,
unsigned int leftIdx,
unsigned int rightIdx, int depth,
int maxShallowDepth, float epsScale) {
assert(leftIdx <= rightIdx);
unsigned int offset = outNodes.size();
if (stats_.maxTreeDepth < depth) {
stats_.maxTreeDepth = depth;
}
float3 bmin, bmax;
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx,
rightIdx, epsScale);
long long n = rightIdx - leftIdx;
if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) {
// Create leaf node.
BVHNode leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(leftIdx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = (unsigned int)leftIdx;
outNodes.push_back(leaf); // atomic update
stats_.numLeafNodes++;
return offset;
}
//
// Create branch node.
//
if (depth >= maxShallowDepth) {
// Delay to build tree
ShallowNodeInfo info;
info.leftIdx = leftIdx;
info.rightIdx = rightIdx;
info.offset = offset;
shallowNodeInfos_.push_back(info);
// Add dummy node.
BVHNode node;
node.axis = -1;
node.flag = -1;
outNodes.push_back(node);
return offset;
}
else {
//
// Compute SAH and find best split axis and position
//
int minCutAxis = 0;
float cutPos[3] = { 0.0, 0.0, 0.0 };
BinBuffer bins(options_.binSize);
ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0),
leftIdx, rightIdx, epsScale);
FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n,
options_.costTaabb, epsScale);
// Try all 3 axis until good cut position avaiable.
unsigned int midIdx;
int cutAxis = minCutAxis;
for (int axisTry = 0; axisTry < 1; axisTry++) {
unsigned int* begin = &indices_[leftIdx];
unsigned int* end = &indices_[rightIdx - 1] + 1; // mimics end() iterator.
unsigned int* mid = 0;
// try minCutAxis first.
cutAxis = (minCutAxis + axisTry) % 3;
//
// Split at (cutAxis, cutPos)
// indices_ will be modified.
//
mid = std::partition(begin, end,
SAHPred(cutAxis, cutPos[cutAxis], vertices, faces));
midIdx = leftIdx + (mid - begin);
if ((midIdx == leftIdx) || (midIdx == rightIdx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
midIdx = leftIdx + (n >> 1);
// Try another axis if there's axis to try.
}
else {
// Found good cut. exit loop.
break;
}
}
BVHNode node;
node.axis = cutAxis;
node.flag = 0; // 0 = branch
outNodes.push_back(node);
unsigned int leftChildIndex = 0;
unsigned int rightChildIndex = 0;
leftChildIndex =
BuildShallowTree(outNodes, vertices, faces, leftIdx, midIdx, depth + 1,
maxShallowDepth, epsScale);
rightChildIndex =
BuildShallowTree(outNodes, vertices, faces, midIdx, rightIdx, depth + 1,
maxShallowDepth, epsScale);
if ((leftChildIndex != (unsigned int)(-1)) &&
(rightChildIndex != (unsigned int)(-1))) {
outNodes[offset].data[0] = leftChildIndex;
outNodes[offset].data[1] = rightChildIndex;
outNodes[offset].bmin[0] = bmin[0];
outNodes[offset].bmin[1] = bmin[1];
outNodes[offset].bmin[2] = bmin[2];
outNodes[offset].bmax[0] = bmax[0];
outNodes[offset].bmax[1] = bmax[1];
outNodes[offset].bmax[2] = bmax[2];
}
else {
if ((leftChildIndex == (unsigned int)(-1)) &&
(rightChildIndex != (unsigned int)(-1))) {
fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex);
exit(-1);
}
else if ((leftChildIndex != (unsigned int)(-1)) &&
(rightChildIndex == (unsigned int)(-1))) {
fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex);
exit(-1);
}
}
}
stats_.numBranchNodes++;
return offset;
}
#endif
size_t BVHAccel::BuildTree(BVHBuildStatistics& outStat,
std::vector<BVHNode>& outNodes,
const float* vertices, const unsigned int* faces,
unsigned int leftIdx, unsigned int rightIdx,
int depth, float epsScale) {
assert(leftIdx <= rightIdx);
size_t offset = outNodes.size();
if (outStat.maxTreeDepth < depth) {
outStat.maxTreeDepth = depth;
}
float3 bmin, bmax;
if (!bboxes_.empty()) {
GetBoundingBox(bmin, bmax, bboxes_, &indices_.at(0), leftIdx, rightIdx,
epsScale);
}
else {
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx,
rightIdx, epsScale);
}
long long n = rightIdx - leftIdx;
if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) {
// Create leaf node.
BVHNode leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(leftIdx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = (unsigned int)n;
leaf.data[1] = (unsigned int)leftIdx;
outNodes.push_back(leaf); // atomic update
outStat.numLeafNodes++;
return offset;
}
//
// Create branch node.
//
//
// Compute SAH and find best split axis and position
//
int minCutAxis = 0;
float cutPos[3] = { 0.0, 0.0, 0.0 };
BinBuffer bins(options_.binSize);
ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0),
leftIdx, rightIdx, epsScale);
FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n,
options_.costTaabb, epsScale);
// Try all 3 axis until good cut position avaiable.
unsigned int midIdx;
int cutAxis = minCutAxis;
for (int axisTry = 0; axisTry < 1; axisTry++) {
unsigned int* begin = &indices_[leftIdx];
unsigned int* end = &indices_[rightIdx - 1] + 1; // mimics end() iterator.
unsigned int* mid = 0;
// try minCutAxis first.
cutAxis = (minCutAxis + axisTry) % 3;
//
// Split at (cutAxis, cutPos)
// indices_ will be modified.
//
mid = std::partition(begin, end,
SAHPred(cutAxis, cutPos[cutAxis], vertices, faces));
midIdx = leftIdx + (unsigned int)(mid - begin);
if ((midIdx == leftIdx) || (midIdx == rightIdx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
midIdx = leftIdx + (unsigned int)(n >> 1);
// Try another axis if there's axis to try.
}
else {
// Found good cut. exit loop.
break;
}
}
BVHNode node;
node.axis = cutAxis;
node.flag = 0; // 0 = branch
outNodes.push_back(node); // atomic update
unsigned int leftChildIndex = 0;
unsigned int rightChildIndex = 0;
leftChildIndex = (unsigned int)BuildTree(outStat, outNodes, vertices, faces, leftIdx,
midIdx, depth + 1, epsScale);
rightChildIndex = (unsigned int)BuildTree(outStat, outNodes, vertices, faces, midIdx,
rightIdx, depth + 1, epsScale);
{
outNodes[offset].data[0] = leftChildIndex;
outNodes[offset].data[1] = rightChildIndex;
outNodes[offset].bmin[0] = bmin[0];
outNodes[offset].bmin[1] = bmin[1];
outNodes[offset].bmin[2] = bmin[2];
outNodes[offset].bmax[0] = bmax[0];
outNodes[offset].bmax[1] = bmax[1];
outNodes[offset].bmax[2] = bmax[2];
}
outStat.numBranchNodes++;
return offset;
}
bool BVHAccel::Build(const float* vertices, const unsigned int* faces,
unsigned int numFaces, const BVHBuildOptions& options) {
options_ = options;
stats_ = BVHBuildStatistics();
assert(options_.binSize > 1);
size_t n = numFaces;
//
// 1. Create triangle indices(this will be permutated in BuildTree)
//
indices_.resize(n);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long long i = 0; i < (long long)n; i++) {
indices_[i] = (unsigned int)i;
}
//
// 2. Compute bounding box to find scene scale.
//
float epsScale = 1.0f;
float3 bmin, bmax;
if (options.cacheBBox) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
bboxes_.resize(n);
for (size_t i = 0; i < n; i++) { // for each faces
size_t idx = indices_[i];
BBox bbox;
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k];
float maxval = vertices[3 * fid + k];
if (bbox.bmin[k] > minval) {
bbox.bmin[k] = minval;
}
if (bbox.bmax[k] < maxval) {
bbox.bmax[k] = maxval;
}
}
}
bboxes_[idx] = bbox;
for (int k = 0; k < 3; k++) { // xyz
if (bmin[k] > bbox.bmin[k]) {
bmin[k] = bbox.bmin[k];
}
if (bmax[k] < bbox.bmax[k]) {
bmax[k] = bbox.bmax[k];
}
}
}
}
else {
#ifdef _OPENMP
ComputeBoundingBoxOMP(bmin, bmax, vertices, faces, &indices_.at(0), 0, n,
epsScale);
#else
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), 0, (unsigned int)n,
epsScale);
#endif
}
// Find max
float3 bsize = bmax - bmin;
epsScale = std::abs(bsize[0]);
if (epsScale < std::abs(bsize[1])) {
epsScale = std::abs(bsize[1]);
}
if (epsScale < std::abs(bsize[2])) {
epsScale = std::abs(bsize[2]);
}
//
// 3. Build tree
//
#ifdef _OPENMP
#if NANORT_ENABLE_PARALLEL_BUILD
// Do parallel build for enoughly large dataset.
if (n > options.minPrimitivesForParallelBuild) {
BuildShallowTree(nodes_, vertices, faces, 0, n, /* root depth */ 0,
options.shallowDepth, epsScale); // [0, n)
assert(shallowNodeInfos_.size() > 0);
// Build deeper tree in parallel
std::vector<std::vector<BVHNode> > local_nodes(shallowNodeInfos_.size());
std::vector<BVHBuildStatistics> local_stats(shallowNodeInfos_.size());
#pragma omp parallel for
for (int i = 0; i < (int)shallowNodeInfos_.size(); i++) {
unsigned int leftIdx = shallowNodeInfos_[i].leftIdx;
unsigned int rightIdx = shallowNodeInfos_[i].rightIdx;
BuildTree(local_stats[i], local_nodes[i], vertices, faces, leftIdx,
rightIdx, options.shallowDepth, epsScale);
}
// Join local nodes
for (int i = 0; i < (int)local_nodes.size(); i++) {
assert(!local_nodes[i].empty());
size_t offset = nodes_.size();
// Add offset to child index(for branch node).
for (size_t j = 0; j < local_nodes[i].size(); j++) {
if (local_nodes[i][j].flag == 0) { // branch
local_nodes[i][j].data[0] += offset - 1;
local_nodes[i][j].data[1] += offset - 1;
}
}
// replace
nodes_[shallowNodeInfos_[i].offset] = local_nodes[i][0];
// Skip root element of the local node.
nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1,
local_nodes[i].end());
}
// Join statistics
for (int i = 0; i < (int)local_nodes.size(); i++) {
stats_.maxTreeDepth =
std::max(stats_.maxTreeDepth, local_stats[i].maxTreeDepth);
stats_.numLeafNodes += local_stats[i].numLeafNodes;
stats_.numBranchNodes += local_stats[i].numBranchNodes;
}
}
else {
BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0,
epsScale); // [0, n)
}
#else // !NANORT_ENABLE_PARALLEL_BUILD
{
BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0,
epsScale); // [0, n)
}
#endif
#else // !_OPENMP
{
BuildTree(stats_, nodes_, vertices, faces, 0, (unsigned int)n, /* root depth */ 0,
epsScale); // [0, n)
}
#endif
stats_.epsScale = epsScale;
epsScale_ = epsScale;
return true;
}
bool BVHAccel::Dump(const char* filename) {
FILE* fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename);
return false;
}
unsigned long long numNodes = nodes_.size();
assert(nodes_.size() > 0);
unsigned long long numIndices = indices_.size();
size_t r = 0;
r = fwrite(&numNodes, sizeof(unsigned long long), 1, fp);
assert(r == 1);
r = fwrite(&nodes_.at(0), sizeof(BVHNode), numNodes, fp);
assert(r == numNodes);
r = fwrite(&numIndices, sizeof(unsigned long long), 1, fp);
assert(r == 1);
r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
bool BVHAccel::Load(const char* filename) {
FILE* fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Cannot open file: %s\n", filename);
return false;
}
unsigned long long numNodes;
unsigned long long numIndices;
size_t r = 0;
r = fread(&numNodes, sizeof(unsigned long long), 1, fp);
assert(r == 1);
assert(numNodes > 0);
nodes_.resize(numNodes);
r = fread(&nodes_.at(0), sizeof(BVHNode), numNodes, fp);
assert(r == numNodes);
r = fread(&numIndices, sizeof(unsigned long long), 1, fp);
assert(r == 1);
indices_.resize(numIndices);
r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
namespace {
const int kMaxStackDepth = 512;
inline bool IntersectRayAABB(float& tminOut, // [out]
float& tmaxOut, // [out]
float maxT, float bmin[3], float bmax[3],
float3 rayOrg, float3 rayInvDir,
int rayDirSign[3]) {
float tmin, tmax;
const float min_x = rayDirSign[0] ? bmax[0] : bmin[0];
const float min_y = rayDirSign[1] ? bmax[1] : bmin[1];
const float min_z = rayDirSign[2] ? bmax[2] : bmin[2];
const float max_x = rayDirSign[0] ? bmin[0] : bmax[0];
const float max_y = rayDirSign[1] ? bmin[1] : bmax[1];
const float max_z = rayDirSign[2] ? bmin[2] : bmax[2];
// X
const float tmin_x = (min_x - rayOrg[0]) * rayInvDir[0];
const float tmax_x = (max_x - rayOrg[0]) * rayInvDir[0];
// Y
const float tmin_y = (min_y - rayOrg[1]) * rayInvDir[1];
const float tmax_y = (max_y - rayOrg[1]) * rayInvDir[1];
tmin = (tmin_x > tmin_y) ? tmin_x : tmin_y;
tmax = (tmax_x < tmax_y) ? tmax_x : tmax_y;
// Z
const float tmin_z = (min_z - rayOrg[2]) * rayInvDir[2];
const float tmax_z = (max_z - rayOrg[2]) * rayInvDir[2];
tmin = (tmin > tmin_z) ? tmin : tmin_z;
tmax = (tmax < tmax_z) ? tmax : tmax_z;
//
// Hit include (tmin == tmax) edge case(hit 2D plane).
//
if ((tmax > 0.0) && (tmin <= tmax) && (tmin <= maxT)) {
tminOut = tmin;
tmaxOut = tmax;
return true;
}
return false; // no hit
}
inline bool TriangleIsect(float& tInOut, float& uOut, float& vOut,
const float3& v0, const float3& v1, const float3& v2,
const float3& rayOrg, const float3& rayDir,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
float3 p0(v0[0], v0[1], v0[2]);
float3 p1(v1[0], v1[1], v1[2]);
float3 p2(v2[0], v2[1], v2[2]);
float3 e1, e2;
float3 p, s, q;
e1 = p1 - p0;
e2 = p2 - p0;
p = vcross(rayDir, e2);
float invDet;
float det = vdot(e1, p);
if (std::abs(det) < kEPS) { // no-cull
return false;
}
invDet = 1.0f / det;
s = rayOrg - p0;
q = vcross(s, e1);
float u = vdot(s, p) * invDet;
float v = vdot(q, rayDir) * invDet;
float t = vdot(e2, q) * invDet;
if (u < 0.0f || u > 1.0f)
return false;
if (v <= 0.0f || u + v > 1.0f)
return false;
if (t < 0.0f || t > tInOut)
return false;
tInOut = t;
uOut = u;
vOut = v;
return true;
}
inline bool TestLeafNode(Intersection& isect, // [inout]
const BVHNode& node,
const std::vector<unsigned int>& indices,
const float* vertices, const unsigned int* faces,
const Ray& ray, float epsScale, const BVHTraceOptions& traceOptions) {
bool hit = false;
unsigned int numTriangles = node.data[0];
unsigned int offset = node.data[1];
float t = isect.t; // current hit distance
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float3 rayDir;
rayDir[0] = ray.dir[0];
rayDir[1] = ray.dir[1];
rayDir[2] = ray.dir[2];
for (unsigned int i = 0; i < numTriangles; i++) {
unsigned int faceIdx = indices[i + offset];
if ((faceIdx < traceOptions.faceIdsRange[0]) || (faceIdx >= traceOptions.faceIdsRange[1])) {
continue;
}
int f0 = faces[3 * faceIdx + 0];
int f1 = faces[3 * faceIdx + 1];
int f2 = faces[3 * faceIdx + 2];
float3 v0, v1, v2;
v0[0] = vertices[3 * f0 + 0];
v0[1] = vertices[3 * f0 + 1];
v0[2] = vertices[3 * f0 + 2];
v1[0] = vertices[3 * f1 + 0];
v1[1] = vertices[3 * f1 + 1];
v1[2] = vertices[3 * f1 + 2];
v2[0] = vertices[3 * f2 + 0];
v2[1] = vertices[3 * f2 + 1];
v2[2] = vertices[3 * f2 + 2];
float u, v;
if (TriangleIsect(t, u, v, v0, v1, v2, rayOrg, rayDir, epsScale)) {
// Update isect state
isect.t = t;
isect.u = u;
isect.v = v;
isect.faceID = faceIdx;
hit = true;
}
}
return hit;
}
inline bool MultiHitTestLeafNode(IsectVector& isects, // [inout]
int maxIntersections, const BVHNode& node,
const std::vector<unsigned int>& indices,
const float* vertices,
const unsigned int* faces, const Ray& ray,
float epsScale) {
bool hit = false;
unsigned int numTriangles = node.data[0];
unsigned int offset = node.data[1];
float t = std::numeric_limits<float>::max();
if (isects.size() >= (size_t)maxIntersections) {
t = isects.top().t; // current furthest hit distance
}
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float3 rayDir;
rayDir[0] = ray.dir[0];
rayDir[1] = ray.dir[1];
rayDir[2] = ray.dir[2];
for (unsigned int i = 0; i < numTriangles; i++) {
int faceIdx = indices[i + offset];
int f0 = faces[3 * faceIdx + 0];
int f1 = faces[3 * faceIdx + 1];
int f2 = faces[3 * faceIdx + 2];
float3 v0, v1, v2;
v0[0] = vertices[3 * f0 + 0];
v0[1] = vertices[3 * f0 + 1];
v0[2] = vertices[3 * f0 + 2];
v1[0] = vertices[3 * f1 + 0];
v1[1] = vertices[3 * f1 + 1];
v1[2] = vertices[3 * f1 + 2];
v2[0] = vertices[3 * f2 + 0];
v2[1] = vertices[3 * f2 + 1];
v2[2] = vertices[3 * f2 + 2];
float u, v;
if (TriangleIsect(t, u, v, v0, v1, v2, rayOrg, rayDir, epsScale)) {
// Update isect state
if (isects.size() < (size_t)maxIntersections) {
Intersection isect;
isect.t = t;
isect.u = u;
isect.v = v;
isect.faceID = faceIdx;
isects.push(isect);
// Update furthest distance to far.
t = std::numeric_limits<float>::max();
hit = true;
}
else {
if (t < isects.top().t) {
// delete furthest intersection and add new intersection.
isects.pop();
Intersection isect;
isect.t = t;
isect.u = u;
isect.v = v;
isect.faceID = faceIdx;
isects.push(isect);
// Update furthest hit distance
t = isects.top().t;
hit = true;
}
}
}
}
return hit;
}
} // namespace
bool BVHAccel::Traverse(Intersection& isect, const float* vertices,
const unsigned int* faces, const Ray& ray, const BVHTraceOptions& options) {
float hitT = std::numeric_limits<float>::max(); // far = no hit.
int nodeStackIndex = 0;
int nodeStack[512];
nodeStack[0] = 0;
// Init isect info as no hit
isect.t = hitT;
isect.u = 0.0;
isect.v = 0.0;
isect.faceID = -1;
int dirSign[3];
dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0;
dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0;
dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
float3 rayInvDir;
rayInvDir[0] = 1.0f / ray.dir[0];
rayInvDir[1] = 1.0f / ray.dir[1];
rayInvDir[2] = 1.0f / ray.dir[2];
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float minT, maxT;
while (nodeStackIndex >= 0) {
int index = nodeStack[nodeStackIndex];
BVHNode& node = nodes_[index];
nodeStackIndex--;
bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg,
rayInvDir, dirSign);
if (node.flag == 0) { // branch node
if (hit) {
int orderNear = dirSign[node.axis];
int orderFar = 1 - orderNear;
// Traverse near first.
nodeStack[++nodeStackIndex] = node.data[orderFar];
nodeStack[++nodeStackIndex] = node.data[orderNear];
}
}
else { // leaf node
if (hit) {
if (TestLeafNode(isect, node, indices_, vertices, faces, ray,
epsScale_, options)) {
hitT = isect.t;
}
}
}
}
assert(nodeStackIndex < kMaxStackDepth);
if (isect.t < std::numeric_limits<float>::max()) {
return true;
}
return false;
}
bool BVHAccel::MultiHitTraverse(StackVector<Intersection, 128>& isects,
int maxIntersections, const float* vertices,
const unsigned int* faces, Ray& ray) {
float hitT = std::numeric_limits<float>::max(); // far = no hit.
int nodeStackIndex = 0;
int nodeStack[512];
nodeStack[0] = 0;
IsectVector isectPQ;
isects->clear();
int dirSign[3];
dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0;
dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0;
dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
float3 rayInvDir;
rayInvDir[0] = 1.0f / ray.dir[0];
rayInvDir[1] = 1.0f / ray.dir[1];
rayInvDir[2] = 1.0f / ray.dir[2];
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float minT, maxT;
while (nodeStackIndex >= 0) {
int index = nodeStack[nodeStackIndex];
BVHNode& node = nodes_[index];
nodeStackIndex--;
bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg,
rayInvDir, dirSign);
if (node.flag == 0) { // branch node
if (hit) {
int orderNear = dirSign[node.axis];
int orderFar = 1 - orderNear;
// Traverse near first.
nodeStack[++nodeStackIndex] = node.data[orderFar];
nodeStack[++nodeStackIndex] = node.data[orderNear];
}
}
else { // leaf node
if (hit) {
if (MultiHitTestLeafNode(isectPQ, maxIntersections, node, indices_,
vertices, faces, ray, epsScale_)) {
// Only update `hitT` when queue is full.
if (isectPQ.size() >= (size_t)maxIntersections) {
hitT = isectPQ.top().t;
}
}
}
}
}
assert(nodeStackIndex < kMaxStackDepth);
if (!isectPQ.empty()) {
// Store intesection in reverse order(make it frontmost order)
size_t n = isectPQ.size();
isects->resize(n);
for (size_t i = 0; i < n; i++) {
const Intersection& isect = isectPQ.top();
isects[n - i - 1] = isect;
isectPQ.pop();
}
return true;
}
return false;
}
} // namespace
#endif
#endif // __NANORT_H__
|
halos.c | #include "../comms.h"
#include "../mesh.h"
#include "../umesh.h"
// Enforce reflective boundary conditions on the problem state
void handle_boundary_2d(const int nx, const int ny, Mesh* mesh, double* arr,
const int invert, const int pack) {
START_PROFILING(&comms_profile);
const int pad = mesh->pad;
int* neighbours = mesh->neighbours;
#ifdef MPI
int nmessages = 0;
if (pack) {
// Pack east and west
if (neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
mesh->east_buffer_out[(ii - pad) * pad + dd] =
arr[(ii * nx) + (nx - 2 * pad + dd)];
}
}
non_block_send(mesh->east_buffer_out, (ny - 2 * pad) * pad,
neighbours[EAST], 2, nmessages++);
non_block_recv(mesh->east_buffer_in, (ny - 2 * pad) * pad,
neighbours[EAST], 3, nmessages++);
}
if (neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
mesh->west_buffer_out[(ii - pad) * pad + dd] =
arr[(ii * nx) + (pad + dd)];
}
}
non_block_send(mesh->west_buffer_out, (ny - 2 * pad) * pad,
neighbours[WEST], 3, nmessages++);
non_block_recv(mesh->west_buffer_in, (ny - 2 * pad) * pad,
neighbours[WEST], 2, nmessages++);
}
// Pack north and south
if (neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
mesh->north_buffer_out[dd * (nx - 2 * pad) + (jj - pad)] =
arr[(ny - 2 * pad + dd) * nx + jj];
}
}
non_block_send(mesh->north_buffer_out, (nx - 2 * pad) * pad,
neighbours[NORTH], 1, nmessages++);
non_block_recv(mesh->north_buffer_in, (nx - 2 * pad) * pad,
neighbours[NORTH], 0, nmessages++);
}
if (neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
mesh->south_buffer_out[dd * (nx - 2 * pad) + (jj - pad)] =
arr[(pad + dd) * nx + jj];
}
}
non_block_send(mesh->south_buffer_out, (nx - 2 * pad) * pad,
neighbours[SOUTH], 0, nmessages++);
non_block_recv(mesh->south_buffer_in, (nx - 2 * pad) * pad,
neighbours[SOUTH], 1, nmessages++);
}
wait_on_messages(nmessages);
// Unpack east and west
if (neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + dd] = mesh->west_buffer_in[(ii - pad) * pad + dd];
}
}
}
if (neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + (nx - pad + dd)] =
mesh->east_buffer_in[(ii - pad) * pad + dd];
}
}
}
// Unpack north and south
if (neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[(ny - pad + dd) * nx + jj] =
mesh->north_buffer_in[dd * (nx - 2 * pad) + (jj - pad)];
}
}
}
if (neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[dd * nx + jj] =
mesh->south_buffer_in[dd * (nx - 2 * pad) + (jj - pad)];
}
}
}
}
#endif
// Perform the boundary reflections, potentially with the data updated from
// neighbours
double x_inversion_coeff = (invert == INVERT_X) ? -1.0 : 1.0;
double y_inversion_coeff = (invert == INVERT_Y) ? -1.0 : 1.0;
// Reflect at the north
if (neighbours[NORTH] == EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[(ny - pad + dd) * nx + jj] =
y_inversion_coeff * arr[(ny - 1 - pad - dd) * nx + jj];
}
}
}
// reflect at the south
if (neighbours[SOUTH] == EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[(pad - 1 - dd) * nx + jj] =
y_inversion_coeff * arr[(pad + dd) * nx + jj];
}
}
}
// reflect at the east
if (neighbours[EAST] == EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + (nx - pad + dd)] =
x_inversion_coeff * arr[ii * nx + (nx - 1 - pad - dd)];
}
}
}
if (neighbours[WEST] == EDGE) {
// reflect at the west
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + (pad - 1 - dd)] =
x_inversion_coeff * arr[ii * nx + (pad + dd)];
}
}
}
STOP_PROFILING(&comms_profile, __func__);
}
// Enforce reflective boundary conditions on the problem state
void handle_boundary_3d(const int nx, const int ny, const int nz, Mesh* mesh,
double* arr, const int invert, const int pack) {
#if 0
START_PROFILING(&comms_profile);
int* neighbours = mesh->neighbours;
#ifdef MPI
int nmessages = 0;
if(pack) {
// Pack east and west
if(neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
mesh->east_buffer_out[(ii*ny*pad)+(jj*pad)+(dd)] =
arr[(ii*nx*ny)+(jj*nx)+(nx-2*pad+dd)];
}
}
}
non_block_send(
mesh->east_buffer_out, nz*ny*pad, neighbours[EAST], 2, nmessages++);
non_block_recv(
mesh->east_buffer_in, nz*ny*pad, neighbours[EAST], 3, nmessages++);
}
if(neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
mesh->west_buffer_out[(ii*ny*pad)+(jj*pad)+(dd)] =
arr[(ii*nx*ny)+(jj*nx)+(pad+dd)];
}
}
}
non_block_send(
mesh->west_buffer_out, nz*ny*pad, neighbours[WEST], 3, nmessages++);
non_block_recv(
mesh->west_buffer_in, nz*ny*pad, neighbours[WEST], 2, nmessages++);
}
// Pack north and south
if(neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
mesh->north_buffer_out[(ii*pad*nx)+(dd*nx)+(kk)] =
arr[(ii*nx*ny)+((ny-2*pad+dd)*nx)+(kk)];
}
}
}
non_block_send(
mesh->north_buffer_out, nz*nx*pad, neighbours[NORTH], 1, nmessages++);
non_block_recv(
mesh->north_buffer_in, nz*nx*pad, neighbours[NORTH], 0, nmessages++);
}
if(neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
mesh->south_buffer_out[(ii*pad*nx)+(dd*nx)+(kk)] =
arr[(ii*nx*ny)+((pad+dd)*nx)+(kk)];
}
}
}
non_block_send(
mesh->south_buffer_out, nz*nx*pad, neighbours[SOUTH], 0, nmessages++);
non_block_recv(
mesh->south_buffer_in, nz*nx*pad, neighbours[SOUTH], 1, nmessages++);
}
// Pack front and back
if(neighbours[FRONT] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
mesh->front_buffer_out[(dd*nx*ny)+(jj*nx)+(kk)] =
arr[((pad+dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
non_block_send(
mesh->front_buffer_out, nx*ny*pad, neighbours[FRONT], 4, nmessages++);
non_block_recv(
mesh->front_buffer_in, nx*ny*pad, neighbours[FRONT], 5, nmessages++);
}
if(neighbours[BACK] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
mesh->back_buffer_out[(dd*nx*ny)+(jj*nx)+(kk)] =
arr[((nz-2*pad+dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
non_block_send(
mesh->back_buffer_out, nx*ny*pad, neighbours[BACK], 5, nmessages++);
non_block_recv(
mesh->back_buffer_in, nx*ny*pad, neighbours[BACK], 4, nmessages++);
}
wait_on_messages(nmessages);
// Unpack east and west
if(neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+(nx-pad+dd)] =
mesh->east_buffer_in[(ii*ny*pad)+(jj*pad)+(dd)];
}
}
}
}
if(neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+dd] =
mesh->west_buffer_in[(ii*ny*pad)+(jj*pad)+(dd)];
}
}
}
}
// Unpack north and south
if(neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+((ny-pad+dd)*nx)+(kk)] =
mesh->north_buffer_in[(ii*pad*nx)+(dd*nx)+(kk)];
}
}
}
}
if(neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+(dd*nx)+(kk)] =
mesh->south_buffer_in[(ii*pad*nx)+(dd*nx)+(kk)];
}
}
}
}
// Unpack front and back
if(neighbours[FRONT] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[(dd*nx*ny)+(jj*nx)+(kk)] =
mesh->front_buffer_in[(dd*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
if(neighbours[BACK] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[((nz-pad+dd)*nx*ny)+(jj*nx)+(kk)] =
mesh->back_buffer_in[(dd*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
}
#endif
// Perform the boundary reflections, potentially with the data updated from neighbours
double x_inversion_coeff = (invert == INVERT_X) ? -1.0 : 1.0;
double y_inversion_coeff = (invert == INVERT_Y) ? -1.0 : 1.0;
double z_inversion_coeff = (invert == INVERT_Z) ? -1.0 : 1.0;
// Reflect at the east
if(neighbours[EAST] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+(nx-pad+dd)] =
x_inversion_coeff*arr[(ii*nx*ny)+(jj*nx)+(nx-1-pad-dd)];
}
}
}
}
// Reflect at the west
if(neighbours[WEST] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+(pad-1-dd)] =
x_inversion_coeff*arr[(ii*nx*ny)+(jj*nx)+(pad+dd)];
}
}
}
}
// Reflect at north
if(neighbours[NORTH] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+((ny-pad+dd)*nx)+(kk)] =
y_inversion_coeff*arr[(ii*nx*ny)+((ny-1-pad-dd)*nx)+(kk)];
}
}
}
}
// Reflect at the south
if(neighbours[SOUTH] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+((pad-1-dd)*nx)+(kk)] =
y_inversion_coeff*arr[(ii*nx*ny)+((pad+dd)*nx)+(kk)];
}
}
}
}
// Reflect at the front
if(neighbours[FRONT] == EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[((pad-1-dd)*nx*ny)+(jj*nx)+(kk)] =
z_inversion_coeff*arr[((pad+dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
// Reflect at the back
if(neighbours[BACK] == EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[((nz-pad+dd)*nx*ny)+(jj*nx)+(kk)] =
z_inversion_coeff*arr[((nz-1-pad-dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
STOP_PROFILING(&comms_profile, __func__);
#endif // if 0
}
// Reflect the node centered velocities on the boundary
void handle_unstructured_reflect(const int nnodes, const int* boundary_index,
const int* boundary_type,
const double* boundary_normal_x,
const double* boundary_normal_y,
double* velocity_x, double* velocity_y) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
const int index = boundary_index[(nn)];
if (index == IS_INTERIOR) {
continue;
}
if (boundary_type[(index)] == IS_BOUNDARY) {
// Project the velocity onto the face direction
const double boundary_parallel_x = boundary_normal_y[(index)];
const double boundary_parallel_y = -boundary_normal_x[(index)];
const double vel_dot_parallel = (velocity_x[(nn)] * boundary_parallel_x +
velocity_y[(nn)] * boundary_parallel_y);
velocity_x[(nn)] = boundary_parallel_x * vel_dot_parallel;
velocity_y[(nn)] = boundary_parallel_y * vel_dot_parallel;
} else if (boundary_type[(index)] == IS_CORNER) {
velocity_x[(nn)] = 0.0;
velocity_y[(nn)] = 0.0;
}
}
}
// Reflect the node centered velocities on the boundary
void handle_unstructured_reflect_3d(const int nnodes, const int* boundary_index,
const int* boundary_type,
const double* boundary_normal_x,
const double* boundary_normal_y,
const double* boundary_normal_z,
double* velocity_x, double* velocity_y,
double* velocity_z) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
const int index = boundary_index[(nn)];
if (index == IS_INTERIOR) {
continue;
}
if (boundary_type[(index)] == IS_EDGE) {
// The normal here isn't actually a normal but a projection vector
const double ab = (velocity_x[(nn)] * boundary_normal_x[(index)] +
velocity_y[(nn)] * boundary_normal_y[(index)] +
velocity_z[(nn)] * boundary_normal_z[(index)]);
// Project the vector onto the edge line
velocity_x[(nn)] = ab * boundary_normal_x[(index)];
velocity_y[(nn)] = ab * boundary_normal_y[(index)];
velocity_z[(nn)] = ab * boundary_normal_z[(index)];
} else if (boundary_type[(index)] == IS_BOUNDARY) {
// Perform an orthogonal projection, assuming normal vector is normalised
const double un = (velocity_x[(nn)] * boundary_normal_x[(index)] +
velocity_y[(nn)] * boundary_normal_y[(index)] +
velocity_z[(nn)] * boundary_normal_z[(index)]);
velocity_x[(nn)] -= un * boundary_normal_x[(index)];
velocity_y[(nn)] -= un * boundary_normal_y[(index)];
velocity_z[(nn)] -= un * boundary_normal_z[(index)];
} else if (boundary_type[(index)] == IS_CORNER) {
velocity_x[(nn)] = 0.0;
velocity_y[(nn)] = 0.0;
velocity_z[(nn)] = 0.0;
}
}
}
|
pr24455-1.c | /* { dg-do compile } */
/* { dg-require-effective-target tls } */
extern int i;
#pragma omp threadprivate (i)
int i;
|
exercicio02.c | #include <stdio.h>
#include <stdlib.h>
#include "omp.h"
static long num_steps = 100000000;
float* cria_vet(int tamanho);
int main() {
double pi, sum = 0.0, tempo_inicial, tempo_final, step;
int i;
step = 1.0 / (double)num_steps;
int nthreads = 4;
omp_set_num_threads(nthreads);
float *vet = cria_vet(nthreads);
tempo_inicial = omp_get_wtime();
#pragma omp parallel
{
int id = omp_get_thread_num();
double local_sum = 0.0, x;
int i;
printf("threads %d\n", id);
for (i = id; i < num_steps; i = i + nthreads) {
x = (i + 0.5) * step;
local_sum = local_sum + 4.0 / (1.0 + x * x);
}
vet[id] = local_sum;
}
for(i = 0; i < nthreads; i++) {
sum = sum + vet[i];
}
pi = step * sum;
tempo_final = omp_get_wtime();
printf("\n Pi = %lf", pi);
printf("\n Tempo gasto: %lf", tempo_final - tempo_inicial);
free(vet);
return 0;
}
float* cria_vet(int tamanho) {
int i;
float *vet = (float *)calloc(tamanho, sizeof(float));
for(i=0; i < tamanho; i++) {
vet[i] = 0.0;
}
return vet;
}
|
libimagequant.c | /*
** © 2009-2018 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <limits.h>
#if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800)
#error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher."
#error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version."
#endif
#ifdef _OPENMP
#include <omp.h>
#define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */
#else
#define LIQ_TEMP_ROW_WIDTH(img_width) (img_width)
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#include "nearest.h"
#include "blur.h"
#include "kmeans.h"
#define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */
// each structure has a pointer as a unique identifier that allows type checking at run time
static const char liq_attr_magic[] = "liq_attr";
static const char liq_image_magic[] = "liq_image";
static const char liq_result_magic[] = "liq_result";
static const char liq_histogram_magic[] = "liq_histogram";
static const char liq_remapping_result_magic[] = "liq_remapping_result";
static const char liq_freed_magic[] = "free";
#define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic)
#define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr)
struct liq_attr {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
double target_mse, max_mse, kmeans_iteration_limit;
float min_opaque_val;
unsigned int max_colors, max_histogram_entries;
unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */;
unsigned int kmeans_iterations, feedback_loop_trials;
bool last_index_transparent, use_contrast_maps;
unsigned char use_dither_map;
unsigned char speed;
unsigned char progress_stage1, progress_stage2, progress_stage3;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_log_callback_function *log_callback;
void *log_callback_user_info;
liq_log_flush_callback_function *log_flush_callback;
void *log_flush_callback_user_info;
};
struct liq_image {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
f_pixel *f_pixels;
rgba_pixel **rows;
double gamma;
unsigned int width, height;
unsigned char *importance_map, *edges, *dither_map;
rgba_pixel *pixels, *temp_row;
f_pixel *temp_f_row;
liq_image_get_rgba_row_callback *row_callback;
void *row_callback_user_info;
liq_image *background;
float min_opaque_val;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
bool free_pixels, free_rows, free_rows_internal;
};
typedef struct liq_remapping_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
unsigned char *pixels;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
double gamma, palette_error;
float dither_level;
unsigned char use_dither_map;
unsigned char progress_stage1;
} liq_remapping_result;
struct liq_result {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
liq_remapping_result *remapping;
colormap *palette;
liq_progress_callback_function *progress_callback;
void *progress_callback_user_info;
liq_palette int_palette;
float dither_level;
double gamma, palette_error;
int min_posterization_output;
unsigned char use_dither_map;
};
struct liq_histogram {
const char *magic_header;
void* (*malloc)(size_t);
void (*free)(void*);
struct acolorhash_table *acht;
double gamma;
f_pixel fixed_colors[256];
unsigned short fixed_colors_count;
unsigned short ignorebits;
bool had_image_added;
};
static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL;
static void contrast_maps(liq_image *image) LIQ_NONNULL;
static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL;
static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL;
static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL;
static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL;
static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL;
static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL;
LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...)
{
if (context->log_callback) {
va_list va;
va_start(va, fmt);
int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0
va_end(va);
LIQ_ARRAY(char, buf, required_space);
va_start(va, fmt);
vsnprintf(buf, required_space, fmt, va);
va_end(va);
context->log_callback(context, buf, context->log_callback_user_info);
}
}
LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg)
{
if (attr->log_callback) {
attr->log_callback(attr, msg, attr->log_callback_user_info);
}
}
LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr)
{
if (attr->log_flush_callback) {
attr->log_flush_callback(attr, attr->log_flush_callback_user_info);
}
}
LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent)
{
return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info);
}
LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent)
{
return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info);
}
#if USE_SSE
inline static bool is_sse_available()
{
#if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64))
return true;
#elif _MSC_VER
int info[4];
__cpuid(info, 1);
/* bool is implemented as a built-in type of size 1 in MSVC */
return info[3] & (1<<26) ? true : false;
#else
int a,b,c,d;
cpuid(1, a, b, c, d);
return d & (1<<25); // edx bit 25 is set when SSE is present
#endif
}
#endif
/* make it clear in backtrace when user-supplied handle points to invalid memory */
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header);
LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header)
{
if (!user_supplied_pointer) {
return false;
}
if (user_supplied_pointer->magic_header == liq_freed_magic) {
fprintf(stderr, "%s used after being freed", expected_magic_header);
// this is not normal error handling, this is programmer error that should crash the program.
// program cannot safely continue if memory has been used after it's been freed.
// abort() is nasty, but security vulnerability may be worse.
abort();
}
return user_supplied_pointer->magic_header == expected_magic_header;
}
NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer);
LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer)
{
if (!pointer) {
return false;
}
// Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not.
// It doesn't matter what value is read, the code here is just to shut the compiler up about unused read.
char test_access = *((volatile char *)pointer);
return test_access || true;
}
LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf(attr, " error: %s", msg);
}
static double quality_to_mse(long quality)
{
if (quality == 0) {
return MAX_DIFF;
}
if (quality == 100) {
return 0;
}
// curve fudged to be roughly similar to quality of libjpeg
// except lowest 10 for really low number of colors
const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001);
return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0;
}
static unsigned int mse_to_quality(double mse)
{
for(int i=100; i > 0; i--) {
if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors
return i;
}
}
return 0;
}
/** internally MSE is a sum of all channels with pixels 0..1 range,
but other software gives per-RGB-channel MSE for 0..255 range */
static double mse_to_standard_mse(double mse) {
return mse * 65536.0/6.0;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE;
attr->target_mse = quality_to_mse(target);
attr->max_mse = quality_to_mse(minimum);
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->max_mse);
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return mse_to_quality(attr->target_mse);
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE;
attr->max_colors = colors;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->max_colors;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_posterization_output = bits;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->min_posterization_output;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE;
unsigned int iterations = MAX(8-speed, 0);
iterations += iterations * iterations/2;
attr->kmeans_iterations = iterations;
attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed));
attr->feedback_loop_trials = MAX(56-9*speed, 0);
attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed);
attr->min_posterization_input = (speed >= 8) ? 1 : 0;
attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping
if (attr->use_dither_map && speed < 3) {
attr->use_dither_map = 2; // always
}
attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map;
attr->speed = speed;
attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8;
if (attr->feedback_loop_trials < 2) {
attr->progress_stage1 += 30;
}
attr->progress_stage3 = 50 / (1+speed);
attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return attr->speed;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
res->gamma = gamma;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE;
attr->min_opaque_val = (double)min/255.0;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1;
return MIN(255.f, 256.f * attr->min_opaque_val);
}
LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->last_index_transparent = !!is_last;
}
LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->progress_callback = callback;
attr->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return;
result->progress_callback = callback;
result->progress_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
liq_verbose_printf_flush(attr);
attr->log_callback = callback;
attr->log_callback_user_info = user_info;
}
LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return;
attr->log_flush_callback = callback;
attr->log_flush_callback_user_info = user_info;
}
LIQ_EXPORT liq_attr* liq_attr_create()
{
return liq_attr_create_with_allocator(NULL, NULL);
}
LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return;
}
liq_verbose_printf_flush(attr);
attr->magic_header = liq_freed_magic;
attr->free(attr);
}
LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig)
{
if (!CHECK_STRUCT_TYPE(orig, liq_attr)) {
return NULL;
}
liq_attr *attr = orig->malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = *orig;
return attr;
}
static void *liq_aligned_malloc(size_t size)
{
unsigned char *ptr = malloc(size + 16);
if (!ptr) {
return NULL;
}
uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1]
ptr += offset;
assert(0 == (((uintptr_t)ptr) & 15));
ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free()
return ptr;
}
LIQ_NONNULL static void liq_aligned_free(void *inptr)
{
unsigned char *ptr = inptr;
size_t offset = ptr[-1] ^ 0x59;
assert(offset > 0 && offset <= 16);
free(ptr - offset);
}
LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*))
{
#if USE_SSE
if (!is_sse_available()) {
return NULL;
}
#endif
if (!custom_malloc && !custom_free) {
custom_malloc = liq_aligned_malloc;
custom_free = liq_aligned_free;
} else if (!custom_malloc != !custom_free) {
return NULL; // either specify both or none
}
liq_attr *attr = custom_malloc(sizeof(liq_attr));
if (!attr) return NULL;
*attr = (liq_attr) {
.magic_header = liq_attr_magic,
.malloc = custom_malloc,
.free = custom_free,
.max_colors = 256,
.min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha)
.last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles.
.target_mse = 0,
.max_mse = MAX_DIFF,
};
liq_set_speed(attr, 4);
return attr;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return LIQ_OK;
}
LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color)
{
if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED;
hist->fixed_colors[hist->fixed_colors_count++] = color;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma)
{
if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER;
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455);
const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){
.r = color.r,
.g = color.g,
.b = color.b,
.a = color.a,
});
return liq_histogram_add_fixed_color_f(hist, px);
}
LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img)
{
img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads());
return img->temp_f_row != NULL;
}
LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint)
{
return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow
}
static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma)
{
if (gamma < 0 || gamma > 1.0) {
liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)");
return NULL;
}
if (!rows && !row_callback) {
liq_log_error(attr, "missing row data");
return NULL;
}
liq_image *img = attr->malloc(sizeof(liq_image));
if (!img) return NULL;
*img = (liq_image){
.magic_header = liq_image_magic,
.malloc = attr->malloc,
.free = attr->free,
.width = width, .height = height,
.gamma = gamma ? gamma : 0.45455,
.rows = rows,
.row_callback = row_callback,
.row_callback_user_info = row_callback_user_info,
.min_opaque_val = attr->min_opaque_val,
};
if (!rows || attr->min_opaque_val < 1.f) {
img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads());
if (!img->temp_row) return NULL;
}
// if image is huge or converted pixels are not likely to be reused then don't cache converted pixels
if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) {
verbose_print(attr, " conserving memory");
if (!liq_image_use_low_memory(img)) return NULL;
}
if (img->min_opaque_val < 1.f) {
verbose_print(attr, " Working around IE6 bug by making image less transparent...");
}
return img;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) {
return LIQ_VALUE_OUT_OF_RANGE;
}
if (ownership_flags & LIQ_OWN_ROWS) {
if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE;
img->free_rows = true;
}
if (ownership_flags & LIQ_OWN_PIXELS) {
img->free_pixels = true;
if (!img->pixels) {
// for simplicity of this API there's no explicit bitmap argument,
// so the row with the lowest address is assumed to be at the start of the bitmap
img->pixels = img->rows[0];
for(unsigned int i=1; i < img->height; i++) {
img->pixels = MIN(img->pixels, img->rows[i]);
}
}
}
return LIQ_OK;
}
LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image);
LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image);
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) {
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER;
const size_t required_size = img->width * img->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
if (ownership == LIQ_COPY_PIXELS) {
unsigned char *tmp = img->malloc(required_size);
if (!tmp) {
return LIQ_OUT_OF_MEMORY;
}
memcpy(tmp, importance_map, required_size);
importance_map = tmp;
} else if (ownership != LIQ_OWN_PIXELS) {
return LIQ_UNSUPPORTED;
}
liq_image_free_importance_map(img);
img->importance_map = importance_map;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER;
if (background->background) {
return LIQ_UNSUPPORTED;
}
if (img->width != background->width || img->height != background->height) {
return LIQ_BUFFER_TOO_SMALL;
}
if (img->background) {
liq_image_destroy(img->background);
}
img->background = background;
liq_image_free_maps(img); // Force them to be re-analyzed with the background
return LIQ_OK;
}
LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return false;
}
if (width <= 0 || height <= 0) {
liq_log_error(attr, "width and height must be > 0");
return false;
}
if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) {
liq_log_error(attr, "image too large");
return false;
}
return true;
}
LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma);
}
LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
for(int i=0; i < height; i++) {
if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) {
liq_log_error(attr, "invalid row pointers");
return NULL;
}
}
return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma);
}
LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma)
{
if (!check_image_size(attr, width, height)) {
return NULL;
}
if (!CHECK_USER_POINTER(bitmap)) {
liq_log_error(attr, "invalid bitmap pointer");
return NULL;
}
rgba_pixel *const pixels = (rgba_pixel *const)bitmap;
rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height);
if (!rows) return NULL;
for(int i=0; i < height; i++) {
rows[i] = pixels + width * i;
}
liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma);
if (!image) {
attr->free(rows);
return NULL;
}
image->free_rows = true;
image->free_rows_internal = true;
return image;
}
NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info);
LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info)
{
assert(callback);
assert(temp_row);
callback(temp_row, row, width, user_info);
}
LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img)
{
if (!CHECK_STRUCT_TYPE(img, liq_image)) {
return false;
}
return img->rows || (img->temp_row && img->row_callback);
}
LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img)
{
assert(liq_image_has_rgba_pixels(img));
const bool iebug = img->min_opaque_val < 1.f;
return (img->rows && !iebug);
}
LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row)
{
if (liq_image_can_use_rgba_rows(img)) {
return img->rows[row];
}
assert(img->temp_row);
rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num();
if (img->rows) {
memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0]));
} else {
liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info);
}
if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row);
return temp_row;
}
LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[])
{
assert(row_f_pixels);
assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15));
const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row);
for(unsigned int col=0; col < img->width; col++) {
row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]);
}
}
LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img)
{
assert(omp_get_thread_num() == 0);
if (img->f_pixels) {
return true;
}
if (!liq_image_should_use_low_memory(img, false)) {
img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height);
}
if (!img->f_pixels) {
return liq_image_use_low_memory(img);
}
if (!liq_image_has_rgba_pixels(img)) {
return false;
}
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
for(unsigned int i=0; i < img->height; i++) {
convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut);
}
return true;
}
LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row)
{
if (!img->f_pixels) {
assert(img->temp_f_row); // init should have done that
float gamma_lut[256];
to_f_set_gamma(gamma_lut, img->gamma);
f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num();
convert_row_to_f(img, row_for_thread, row, gamma_lut);
return row_for_thread;
}
return img->f_pixels + img->width * row;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->width;
}
LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1;
return input_image->height;
}
typedef void free_func(void*);
LIQ_NONNULL static free_func *get_default_free_func(liq_image *img)
{
// When default allocator is used then user-supplied pointers must be freed with free()
if (img->free_rows_internal || img->free != liq_aligned_free) {
return img->free;
}
return free;
}
LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image)
{
if (input_image->free_pixels && input_image->pixels) {
get_default_free_func(input_image)(input_image->pixels);
input_image->pixels = NULL;
}
if (input_image->free_rows && input_image->rows) {
get_default_free_func(input_image)(input_image->rows);
input_image->rows = NULL;
}
}
LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) {
if (input_image->importance_map) {
input_image->free(input_image->importance_map);
input_image->importance_map = NULL;
}
}
LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) {
liq_image_free_importance_map(input_image);
if (input_image->edges) {
input_image->free(input_image->edges);
input_image->edges = NULL;
}
if (input_image->dither_map) {
input_image->free(input_image->dither_map);
input_image->dither_map = NULL;
}
}
LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return;
liq_image_free_rgba_source(input_image);
liq_image_free_maps(input_image);
if (input_image->f_pixels) {
input_image->free(input_image->f_pixels);
}
if (input_image->temp_row) {
input_image->free(input_image->temp_row);
}
if (input_image->temp_f_row) {
input_image->free(input_image->temp_f_row);
}
if (input_image->background) {
liq_image_destroy(input_image->background);
}
input_image->magic_header = liq_freed_magic;
input_image->free(input_image);
}
LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) {
return NULL;
}
liq_histogram *hist = attr->malloc(sizeof(liq_histogram));
if (!hist) return NULL;
*hist = (liq_histogram) {
.magic_header = liq_histogram_magic,
.malloc = attr->malloc,
.free = attr->free,
.ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input),
};
return hist;
}
LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist)
{
if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return;
hist->magic_header = liq_freed_magic;
pam_freeacolorhash(hist->acht);
hist->free(hist);
}
LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img)
{
liq_result *res;
if (LIQ_OK != liq_image_quantize(img, attr, &res)) {
return NULL;
}
return res;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output)
{
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!liq_image_has_rgba_pixels(img)) {
return LIQ_UNSUPPORTED;
}
liq_histogram *hist = liq_histogram_create(attr);
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_error err = liq_histogram_add_image(hist, attr, img);
if (LIQ_OK != err) {
return err;
}
err = liq_histogram_quantize_internal(hist, attr, false, result_output);
liq_histogram_destroy(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) {
return liq_histogram_quantize_internal(input_hist, attr, true, result_output);
}
LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output)
{
if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER;
*result_output = NULL;
if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (liq_progress(attr, 0)) return LIQ_ABORTED;
histogram *hist;
liq_error err = finalize_histogram(input_hist, attr, &hist);
if (err != LIQ_OK) {
return err;
}
err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output);
pam_freeacolorhist(hist);
return err;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER;
if (res->remapping) {
liq_remapping_result_destroy(res->remapping);
res->remapping = NULL;
}
if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE;
res->dither_level = dither_level;
return LIQ_OK;
}
LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return NULL;
}
liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result));
if (!res) return NULL;
*res = (liq_remapping_result) {
.magic_header = liq_remapping_result_magic,
.malloc = result->malloc,
.free = result->free,
.dither_level = result->dither_level,
.use_dither_map = result->use_dither_map,
.palette_error = result->palette_error,
.gamma = result->gamma,
.palette = pam_duplicate_colormap(result->palette),
.progress_callback = result->progress_callback,
.progress_callback_user_info = result->progress_callback_user_info,
.progress_stage1 = result->use_dither_map ? 20 : 0,
};
return res;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
return result->gamma;
}
LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return;
if (result->palette) pam_freecolormap(result->palette);
if (result->pixels) result->free(result->pixels);
result->magic_header = liq_freed_magic;
result->free(result);
}
LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res)
{
if (!CHECK_STRUCT_TYPE(res, liq_result)) return;
memset(&res->int_palette, 0, sizeof(liq_palette));
if (res->remapping) {
memset(&res->remapping->int_palette, 0, sizeof(liq_palette));
liq_remapping_result_destroy(res->remapping);
}
pam_freecolormap(res->palette);
res->magic_header = liq_freed_magic;
res->free(res);
}
LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_standard_mse(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_standard_mse(result->remapping->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->palette_error >= 0) {
return mse_to_quality(result->palette_error);
}
return -1;
}
LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) {
if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1;
if (result->remapping && result->remapping->palette_error >= 0) {
return mse_to_quality(result->remapping->palette_error);
}
return -1;
}
LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2)
{
const float v1 = ((const colormap_item*)ch1)->popularity;
const float v2 = ((const colormap_item*)ch2)->popularity;
return v1 > v2 ? -1 : 1;
}
LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem)
{
if (!nelem) return;
qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity);
}
#define SWAP_PALETTE(map, a,b) { \
const colormap_item tmp = (map)->palette[(a)]; \
(map)->palette[(a)] = (map)->palette[(b)]; \
(map)->palette[(b)] = tmp; }
LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options)
{
/*
** Step 3.5 [GRR]: remap the palette colors so that all entries with
** the maximal alpha value (i.e., fully opaque) are at the end and can
** therefore be omitted from the tRNS chunk.
*/
if (options->last_index_transparent) {
for(unsigned int i=0; i < map->colors; i++) {
if (map->palette[i].acolor.a < 1.f/256.f) {
const unsigned int old = i, transparent_dest = map->colors-1;
SWAP_PALETTE(map, transparent_dest, old);
/* colors sorted by popularity make pngs slightly more compressible */
sort_palette_qsort(map, 0, map->colors-1);
return;
}
}
}
unsigned int non_fixed_colors = 0;
for(unsigned int i = 0; i < map->colors; i++) {
if (map->palette[i].fixed) {
break;
}
non_fixed_colors++;
}
/* move transparent colors to the beginning to shrink trns chunk */
unsigned int num_transparent = 0;
for(unsigned int i = 0; i < non_fixed_colors; i++) {
if (map->palette[i].acolor.a < 255.f/256.f) {
// current transparent color is swapped with earlier opaque one
if (i != num_transparent) {
SWAP_PALETTE(map, num_transparent, i);
i--;
}
num_transparent++;
}
}
liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies");
/* colors sorted by popularity make pngs slightly more compressible
* opaque and transparent are sorted separately
*/
sort_palette_qsort(map, 0, num_transparent);
sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent);
if (non_fixed_colors > 9 && map->colors > 16) {
SWAP_PALETTE(map, 7, 1); // slightly improves compression
SWAP_PALETTE(map, 8, 2);
SWAP_PALETTE(map, 9, 3);
}
}
inline static unsigned int posterize_channel(unsigned int color, unsigned int bits)
{
return (color & ~((1<<bits)-1)) | (color >> (8-bits));
}
LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize)
{
float gamma_lut[256];
to_f_set_gamma(gamma_lut, gamma);
dest->count = map->colors;
for(unsigned int x = 0; x < map->colors; ++x) {
rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor);
px.r = posterize_channel(px.r, posterize);
px.g = posterize_channel(px.g, posterize);
px.b = posterize_channel(px.b, posterize);
px.a = posterize_channel(px.a, posterize);
map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */
if (!px.a && !map->palette[x].fixed) {
px.r = 71; px.g = 112; px.b = 76;
}
dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a};
}
}
LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL;
if (result->remapping && result->remapping->int_palette.count) {
return &result->remapping->int_palette;
}
if (!result->int_palette.count) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output);
}
return &result->int_palette;
}
LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map)
{
const int rows = input_image->height;
const unsigned int cols = input_image->width;
double remapping_error=0;
if (!liq_image_get_row_f_init(input_image)) {
return -1;
}
if (input_image->background && !liq_image_get_row_f_init(input_image->background)) {
return -1;
}
const colormap_item *acolormap = map->palette;
struct nearest_map *const n = nearest_init(map);
const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0;
const unsigned int max_threads = omp_get_max_threads();
LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads);
kmeans_init(map, max_threads, average_color);
#pragma omp parallel for if (rows*cols > 3000) \
schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error)
for(int row = 0; row < rows; ++row) {
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL;
unsigned int last_match=0;
for(unsigned int col = 0; col < cols; ++col) {
float diff;
last_match = nearest_search(n, &row_pixels[col], last_match, &diff);
if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) {
last_match = transparent_index;
}
output_pixels[row][col] = last_match;
remapping_error += diff;
kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color);
}
}
kmeans_finalize(map, max_threads, average_color);
nearest_free(n);
return remapping_error / (input_image->width * input_image->height);
}
inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px)
{
/* Use Floyd-Steinberg errors to adjust actual color. */
const float sr = thiserr.r * dither_level,
sg = thiserr.g * dither_level,
sb = thiserr.b * dither_level,
sa = thiserr.a * dither_level;
float ratio = 1.0;
const float max_overflow = 1.1f;
const float max_underflow = -0.1f;
// allowing some overflow prevents undithered bands caused by clamping of all channels
if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr);
else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); }
if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg);
else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); }
if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb);
else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); }
float a = px.a + sa;
if (a > 1.f) { a = 1.f; }
else if (a < 0) { a = 0; }
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa;
if (dither_error > max_dither_error) {
ratio *= 0.8f;
} else if (dither_error < 2.f/256.f/256.f) {
// don't dither areas that don't have noticeable error — makes file smaller
return px;
}
return (f_pixel) {
.r=px.r + sr * ratio,
.g=px.g + sg * ratio,
.b=px.b + sb * ratio,
.a=a,
};
}
/**
Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered.
If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image.
*/
LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped)
{
const int rows = input_image->height, cols = input_image->width;
const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL;
const colormap *map = quant->palette;
const colormap_item *acolormap = map->palette;
if (!liq_image_get_row_f_init(input_image)) {
return false;
}
if (input_image->background && !liq_image_get_row_f_init(input_image->background)) {
return false;
}
/* Initialize Floyd-Steinberg error vectors. */
const size_t errwidth = cols+2;
f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access
if (!thiserr) return false;
f_pixel *restrict nexterr = thiserr + errwidth;
memset(thiserr, 0, errwidth * sizeof(thiserr[0]));
bool ok = true;
struct nearest_map *const n = nearest_init(map);
const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0;
// response to this value is non-linear and without it any value < 0.8 would give almost no dithering
float base_dithering_level = quant->dither_level;
base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level);
if (dither_map) {
base_dithering_level *= 1.f/255.f; // convert byte to float
}
base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating
int fs_direction = 1;
unsigned int last_match=0;
for (int row = 0; row < rows; ++row) {
if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) {
ok = false;
break;
}
memset(nexterr, 0, errwidth * sizeof(nexterr[0]));
int col = (fs_direction > 0) ? 0 : (cols - 1);
const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row);
const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL;
do {
float dither_level = base_dithering_level;
if (dither_map) {
dither_level *= dither_map[row*cols + col];
}
const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]);
const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match;
float diff;
last_match = nearest_search(n, &spx, guessed_match, &diff);
f_pixel output_px = acolormap[last_match].acolor;
if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) {
output_px = bg_pixels[col];
output_pixels[row][col] = transparent_index;
} else {
output_pixels[row][col] = last_match;
}
f_pixel err = {
.r = (spx.r - output_px.r),
.g = (spx.g - output_px.g),
.b = (spx.b - output_px.b),
.a = (spx.a - output_px.a),
};
// If dithering error is crazy high, don't propagate it that much
// This prevents crazy geen pixels popping out of the blue (or red or black! ;)
if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) {
err.r *= 0.75f;
err.g *= 0.75f;
err.b *= 0.75f;
err.a *= 0.75f;
}
/* Propagate Floyd-Steinberg error terms. */
if (fs_direction > 0) {
thiserr[col + 2].a += err.a * (7.f/16.f);
thiserr[col + 2].r += err.r * (7.f/16.f);
thiserr[col + 2].g += err.g * (7.f/16.f);
thiserr[col + 2].b += err.b * (7.f/16.f);
nexterr[col + 2].a = err.a * (1.f/16.f);
nexterr[col + 2].r = err.r * (1.f/16.f);
nexterr[col + 2].g = err.g * (1.f/16.f);
nexterr[col + 2].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col ].a += err.a * (3.f/16.f);
nexterr[col ].r += err.r * (3.f/16.f);
nexterr[col ].g += err.g * (3.f/16.f);
nexterr[col ].b += err.b * (3.f/16.f);
} else {
thiserr[col ].a += err.a * (7.f/16.f);
thiserr[col ].r += err.r * (7.f/16.f);
thiserr[col ].g += err.g * (7.f/16.f);
thiserr[col ].b += err.b * (7.f/16.f);
nexterr[col ].a = err.a * (1.f/16.f);
nexterr[col ].r = err.r * (1.f/16.f);
nexterr[col ].g = err.g * (1.f/16.f);
nexterr[col ].b = err.b * (1.f/16.f);
nexterr[col + 1].a += err.a * (5.f/16.f);
nexterr[col + 1].r += err.r * (5.f/16.f);
nexterr[col + 1].g += err.g * (5.f/16.f);
nexterr[col + 1].b += err.b * (5.f/16.f);
nexterr[col + 2].a += err.a * (3.f/16.f);
nexterr[col + 2].r += err.r * (3.f/16.f);
nexterr[col + 2].g += err.g * (3.f/16.f);
nexterr[col + 2].b += err.b * (3.f/16.f);
}
// remapping is done in zig-zag
col += fs_direction;
if (fs_direction > 0) {
if (col >= cols) break;
} else {
if (col < 0) break;
}
} while(1);
f_pixel *const temperr = thiserr;
thiserr = nexterr;
nexterr = temperr;
fs_direction = -fs_direction;
}
input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped
nearest_free(n);
return ok;
}
/* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */
LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse)
{
const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f);
if (fixed_colors_count) {
for(int j=0; j < hist->size; j++) {
for(unsigned int i=0; i < fixed_colors_count; i++) {
if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) {
hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry
j--; break; // continue searching histogram
}
}
}
}
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER;
if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE;
if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE;
if (input_hist->ignorebits > 0 && input_hist->had_image_added) {
return LIQ_UNSUPPORTED;
}
input_hist->ignorebits = 0;
input_hist->had_image_added = true;
input_hist->gamma = gamma ? gamma : 0.45455;
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free);
if (!input_hist->acht) {
return LIQ_OUT_OF_MEMORY;
}
}
// Fake image size. It's only for hash size estimates.
if (!input_hist->acht->cols) {
input_hist->acht->cols = num_entries;
}
input_hist->acht->rows += num_entries;
const unsigned int hash_size = input_hist->acht->hash_size;
for(int i=0; i < num_entries; i++) {
const rgba_pixel rgba = {
.r = entries[i].color.r,
.g = entries[i].color.g,
.b = entries[i].color.b,
.a = entries[i].color.a,
};
union rgba_as_int px = {rgba};
unsigned int hash;
if (px.rgba.a) {
hash = px.l % hash_size;
} else {
hash=0; px.l=0;
}
if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) {
return LIQ_OUT_OF_MEMORY;
}
}
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image)
{
if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
const unsigned int cols = input_image->width, rows = input_image->height;
if (!input_image->importance_map && options->use_contrast_maps) {
contrast_maps(input_image);
}
input_hist->gamma = input_image->gamma;
for(int i = 0; i < input_image->fixed_colors_count; i++) {
liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]);
if (res != LIQ_OK) {
return res;
}
}
/*
** Step 2: attempt to make a histogram of the colors, unclustered.
** If at first we don't succeed, increase ignorebits to increase color
** coherence and try again.
*/
if (liq_progress(options, options->progress_stage1 * 0.4f)) {
return LIQ_ABORTED;
}
const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image);
// Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not
// the first image added
const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries;
do {
if (!input_hist->acht) {
input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free);
}
if (!input_hist->acht) return LIQ_OUT_OF_MEMORY;
// histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important.
// noise map does not include edges to avoid ruining anti-aliasing
for(unsigned int row=0; row < rows; row++) {
bool added_ok;
if (all_rows_at_once) {
added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map);
if (added_ok) break;
} else {
const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) };
added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL);
}
if (!added_ok) {
input_hist->ignorebits++;
liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED;
break;
}
}
} while(!input_hist->acht);
input_hist->had_image_added = true;
liq_image_free_importance_map(input_image);
if (input_image->free_pixels && input_image->f_pixels) {
liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels
}
return LIQ_OK;
}
LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output)
{
if (liq_progress(options, options->progress_stage1 * 0.9f)) {
return LIQ_ABORTED;
}
if (!input_hist->acht) {
return LIQ_BITMAP_NOT_AVAILABLE;
}
histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free);
pam_freeacolorhash(input_hist->acht);
input_hist->acht = NULL;
if (!hist) {
return LIQ_OUT_OF_MEMORY;
}
liq_verbose_printf(options, " made histogram...%d colors found", hist->size);
remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse);
*hist_output = hist;
return LIQ_OK;
}
LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels)
{
/* IE6 makes colors with even slightest transparency completely transparent,
thus to improve situation in IE, make colors that are less than ~10% transparent
completely opaque */
const float min_opaque_val = input_image->min_opaque_val;
const float almost_opaque_val = min_opaque_val * 169.f/256.f;
const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f;
for(unsigned int col = 0; col < input_image->width; col++) {
const rgba_pixel px = row_pixels[col];
/* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */
if (px.a >= almost_opaque_val_int) {
float al = px.a / 255.f;
al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val);
al *= 256.f;
row_pixels[col].a = al >= 255.f ? 255 : al;
}
}
}
/**
Builds two maps:
importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy.
edges - noise map including all edges
*/
LIQ_NONNULL static void contrast_maps(liq_image *image)
{
const unsigned int cols = image->width, rows = image->height;
if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) {
return;
}
unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows);
image->importance_map = NULL;
unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows);
image->edges = NULL;
unsigned char *restrict tmp = image->malloc(cols*rows);
if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) {
image->free(noise);
image->free(edges);
image->free(tmp);
return;
}
const f_pixel *curr_row, *prev_row, *next_row;
curr_row = prev_row = next_row = liq_image_get_row_f(image, 0);
for (unsigned int j=0; j < rows; j++) {
prev_row = curr_row;
curr_row = next_row;
next_row = liq_image_get_row_f(image, MIN(rows-1,j+1));
f_pixel prev, curr = curr_row[0], next=curr;
for (unsigned int i=0; i < cols; i++) {
prev=curr;
curr=next;
next = curr_row[MIN(cols-1,i+1)];
// contrast is difference between pixels neighbouring horizontally and vertically
const float a = fabsf(prev.a+next.a - curr.a*2.f),
r = fabsf(prev.r+next.r - curr.r*2.f),
g = fabsf(prev.g+next.g - curr.g*2.f),
b = fabsf(prev.b+next.b - curr.b*2.f);
const f_pixel prevl = prev_row[i];
const f_pixel nextl = next_row[i];
const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f),
r1 = fabsf(prevl.r+nextl.r - curr.r*2.f),
g1 = fabsf(prevl.g+nextl.g - curr.g*2.f),
b1 = fabsf(prevl.b+nextl.b - curr.b*2.f);
const float horiz = MAX(MAX(a,r),MAX(g,b));
const float vert = MAX(MAX(a1,r1),MAX(g1,b1));
const float edge = MAX(horiz,vert);
float z = edge - fabsf(horiz-vert)*.5f;
z = 1.f - MAX(z,MIN(horiz,vert));
z *= z; // noise is amplified
z *= z;
// 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely).
const unsigned int z_int = 85 + (unsigned int)(z * 171.f);
noise[j*cols+i] = MIN(z_int, 255);
const int e_int = 255 - (int)(edge * 256.f);
edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0;
}
}
// noise areas are shrunk and then expanded to remove thin edges from the map
liq_max3(noise, tmp, cols, rows);
liq_max3(tmp, noise, cols, rows);
liq_blur(noise, tmp, noise, cols, rows, 3);
liq_max3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(noise, tmp, cols, rows);
liq_min3(tmp, noise, cols, rows);
liq_min3(edges, tmp, cols, rows);
liq_max3(tmp, edges, cols, rows);
for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]);
image->free(tmp);
image->importance_map = noise;
image->edges = edges;
}
/**
* Builds map of neighbor pixels mapped to the same palette entry
*
* For efficiency/simplicity it mainly looks for same consecutive pixels horizontally
* and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly.
* Correct flood fill doesn't have visually good properties.
*/
LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map)
{
const unsigned int width = input_image->width;
const unsigned int height = input_image->height;
unsigned char *const edges = input_image->edges;
for(unsigned int row=0; row < height; row++) {
unsigned char lastpixel = row_pointers[row][0];
unsigned int lastcol=0;
for(unsigned int col=1; col < width; col++) {
const unsigned char px = row_pointers[row][col];
if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) {
// Transparency may or may not create an edge. When there's an explicit background set, assume no edge.
continue;
}
if (px != lastpixel || col == width-1) {
int neighbor_count = 10 * (col-lastcol);
unsigned int i=lastcol;
while(i < col) {
if (row > 0) {
unsigned char pixelabove = row_pointers[row-1][i];
if (pixelabove == lastpixel) neighbor_count += 15;
}
if (row < height-1) {
unsigned char pixelbelow = row_pointers[row+1][i];
if (pixelbelow == lastpixel) neighbor_count += 15;
}
i++;
}
while(lastcol <= col) {
int e = edges[row*width + lastcol];
edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count));
}
lastpixel = px;
}
}
}
input_image->dither_map = input_image->edges;
input_image->edges = NULL;
}
/**
* Palette can be NULL, in which case it creates a new palette from scratch.
*/
static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*))
{
if (!fixed_colors_count) return palette;
colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free);
unsigned int i=0;
if (palette && fixed_colors_count < max_colors) {
unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count);
for(; i < palette_max; i++) {
newpal->palette[i] = palette->palette[i];
}
}
for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) {
newpal->palette[i++] = (colormap_item){
.acolor = fixed_colors[j],
.fixed = true,
};
}
if (palette) pam_freecolormap(palette);
return newpal;
}
LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff)
{
item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff));
}
/**
Repeats mediancut with different histogram weights to find palette with minimum error.
feedback_loop_trials controls how long the search will take. < 0 skips the iteration.
*/
static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p)
{
unsigned int max_colors = options->max_colors;
// if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse
// at this point actual gamma is not set, so very conservative posterization estimate is used
const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2)));
int feedback_loop_trials = options->feedback_loop_trials;
if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;}
colormap *acolormap = NULL;
double least_error = MAX_DIFF;
double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0;
const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1);
do {
colormap *newmap;
if (hist->size && fixed_colors_count < max_colors) {
newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2,
options->malloc, options->free);
} else {
feedback_loop_trials = 0;
newmap = NULL;
}
newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
if (!newmap) {
return NULL;
}
if (feedback_loop_trials <= 0) {
return newmap;
}
// after palette has been created, total error (MSE) is calculated to keep the best palette
// at the same time K-Means iteration is done to improve the palette
// and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors
const bool first_run_of_target_mse = !acolormap && target_mse > 0;
double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback);
// goal is to increase quality or to reduce number of colors used if quality is good enough
if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) {
if (acolormap) pam_freecolormap(acolormap);
acolormap = newmap;
if (total_error < target_mse && total_error > 0) {
// K-Means iteration improves quality above what mediancut aims for
// this compensates for it, making mediancut aim for worse
target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error);
}
least_error = total_error;
// if number of colors could be reduced, try to keep it that way
// but allow extra color as a bit of wiggle room in case quality can be improved too
max_colors = MIN(newmap->colors+1, max_colors);
feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever
} else {
for(unsigned int j=0; j < hist->size; j++) {
hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0;
}
target_mse_overshoot = 1.0;
feedback_loop_trials -= 6;
// if error is really bad, it's unlikely to improve, so end sooner
if (total_error > least_error*4) feedback_loop_trials -= 3;
pam_freecolormap(newmap);
}
float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials);
if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break;
liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done));
}
while(feedback_loop_trials > 0);
*palette_error_p = least_error;
return acolormap;
}
static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) {
if (!hist->size) {
return NULL;
}
colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free);
for(unsigned int i=0; i < hist->size; i++) {
acolormap->palette[i].acolor = hist->achv[i].acolor;
acolormap->palette[i].popularity = hist->achv[i].perceptual_weight;
}
return acolormap;
}
LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output)
{
colormap *acolormap;
double palette_error = -1;
assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1));
const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors;
if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED;
// If image has few colors to begin with (and no quality degradation is required)
// then it's possible to skip quantization entirely
if (few_input_colors && options->target_mse == 0) {
acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free);
palette_error = 0;
} else {
const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain
acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error);
if (!acolormap) {
return LIQ_VALUE_OUT_OF_RANGE;
}
// K-Means iteration approaches local minimum for the palette
double iteration_limit = options->kmeans_iteration_limit;
unsigned int iterations = options->kmeans_iterations;
if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work
if (iterations) {
// likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap
if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) {
if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) {
hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway
}
}
if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;}
if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;}
verbose_print(options, " moving colormap towards local minimum");
double previous_palette_error = MAX_DIFF;
for(unsigned int i=0; i < iterations; i++) {
palette_error = kmeans_do_iteration(hist, acolormap, NULL);
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) {
break;
}
if (fabs(previous_palette_error-palette_error) < iteration_limit) {
break;
}
if (palette_error > max_mse*1.5) { // probably hopeless
if (palette_error > max_mse*3.0) break; // definitely hopeless
i++;
}
previous_palette_error = palette_error;
}
}
if (palette_error > max_mse) {
liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)",
mse_to_standard_mse(palette_error), mse_to_quality(palette_error),
mse_to_standard_mse(max_mse), mse_to_quality(max_mse));
pam_freecolormap(acolormap);
return LIQ_QUALITY_TOO_LOW;
}
}
if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) {
pam_freecolormap(acolormap);
return LIQ_ABORTED;
}
sort_palette(acolormap, options);
// If palette was created from a multi-image histogram,
// then it shouldn't be optimized for one image during remapping
if (fixed_result_colors) {
for(unsigned int i=0; i < acolormap->colors; i++) {
acolormap->palette[i].fixed = true;
}
}
liq_result *result = options->malloc(sizeof(liq_result));
if (!result) return LIQ_OUT_OF_MEMORY;
*result = (liq_result){
.magic_header = liq_result_magic,
.malloc = options->malloc,
.free = options->free,
.palette = acolormap,
.palette_error = palette_error,
.use_dither_map = options->use_dither_map,
.gamma = gamma,
.min_posterization_output = options->min_posterization_output,
};
*result_output = result;
return LIQ_OK;
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size)
{
if (!CHECK_STRUCT_TYPE(result, liq_result)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) {
return LIQ_INVALID_POINTER;
}
if (!CHECK_USER_POINTER(buffer)) {
return LIQ_INVALID_POINTER;
}
const size_t required_size = input_image->width * input_image->height;
if (buffer_size < required_size) {
return LIQ_BUFFER_TOO_SMALL;
}
LIQ_ARRAY(unsigned char *, rows, input_image->height);
unsigned char *buffer_bytes = buffer;
for(unsigned int i=0; i < input_image->height; i++) {
rows[i] = &buffer_bytes[input_image->width * i];
}
return liq_write_remapped_image_rows(result, input_image, rows);
}
LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers)
{
if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER;
if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER;
for(unsigned int i=0; i < input_image->height; i++) {
if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER;
}
if (quant->remapping) {
liq_remapping_result_destroy(quant->remapping);
}
liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant);
if (!result) return LIQ_OUT_OF_MEMORY;
if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) {
contrast_maps(input_image);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) {
return LIQ_ABORTED;
}
/*
** Step 4: map the colors in the image to their closest match in the
** new colormap, and write 'em out.
*/
float remapping_error = result->palette_error;
if (result->dither_level == 0) {
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
} else {
const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000;
const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map);
const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map);
if (generate_dither_map) {
// If dithering (with dither map) is required, this image is used to find areas that require dithering
remapping_error = remap_to_palette(input_image, row_pointers, result->palette);
update_dither_map(input_image, row_pointers, result->palette);
}
if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) {
return LIQ_ABORTED;
}
// remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping
set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output);
if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) {
return LIQ_ABORTED;
}
}
// remapping error from dithered image is absurd, so always non-dithered value is used
// palette_error includes some perceptual weighting from histogram which is closer correlated with dssim
// so that should be used when possible.
if (result->palette_error < 0) {
result->palette_error = remapping_error;
}
return LIQ_OK;
}
LIQ_EXPORT int liq_version() {
return LIQ_VERSION;
}
|
exch.c | #include "clib.h"
/*
compute the effective exchange field at site i
H_i = J \sum_<i,j> S_j
with Hamiltonian
Hamiltonian = - J \sum_<i,j> S_i \cdot S_j
Note that the pair <i,j> only run once for each pair.
- NEIGHBOURS in the arguments:
The first 6 elements of the ngbs[] are the indexes of the nearest neighbours in
the following order:
-x, +x, -y, +y, -z, +z
for every spin. It is -1 for boundaries. The array is like:
__here are next neighbour indexes
|
| 0-x, 0+x, 0-y, 0+y, 0-z, 0+z, ... 1-x, 1+x, 1-y, ... |
i=0 i=1 ...
where 0-y is the index of the neighbour of the 0th spin, in the -y direction,
for example
Thus, for every nearest neighbour ( ngbs[i + j], j=0,1,...5 ) we compute the
field contribution: Sum_j J_j S_j of the neighbours to the exchange interaction
The ngbs array also gives the correct indexes for the spins at periodic
boundaries
*/
void compute_exch_field(double *restrict spin, double *restrict field,
double *restrict mu_s_inv,
double *restrict energy,
double Jx, double Jy, double Jz,
int *restrict ngbs, int n, int n_ngbs) {
#pragma omp parallel for
for (int i = 0; i < n; i++) {
int id = 0;
int id_nn = n_ngbs * i; // index for the neighbours
double fx = 0, fy = 0, fz = 0;
for (int j = 0; j < 6; j++) {
if (ngbs[id_nn + j] >= 0) {
id = 3 * ngbs[id_nn + j];
fx += Jx * spin[id];
fy += Jy * spin[id + 1];
fz += Jz * spin[id + 2];
}
}
field[3 * i] = fx;
field[3 * i + 1] = fy;
field[3 * i + 2] = fz;
energy[i] = -0.5 * (fx * spin[3 * i] + fy * spin[3 * i + 1] +
fz * spin[3 * i + 2]);
// Scale the field to 1/mu_s
field[3 * i] *= mu_s_inv[i];
field[3 * i + 1] *= mu_s_inv[i];
field[3 * i + 2] *= mu_s_inv[i];
}
}
double compute_exch_energy(double *restrict spin,
double Jx, double Jy, double Jz,
int nx, int ny, int nz, int xperiodic, int yperiodic) {
int nyz = ny * nz;
int n1 = nx * nyz, n2 = 2 * n1;
int i, j, k;
int index, id;
double Sx, Sy, Sz;
double energy = 0;
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
for (k = 0; k < nz; k++) {
index = nyz * i + nz * j + k;
Sx = spin[index];
Sy = spin[index + n1];
Sz = spin[index + n2];
if (i < nx - 1 || xperiodic) {
id = index + nyz;
if (i == nx-1){
id -= n1;
}
energy += Jx * Sx * spin[id];
energy += Jy * Sy * spin[id + n1];
energy += Jz * Sz * spin[id + n2];
}
if (j < ny - 1 || yperiodic) {
id = index + nz;
if (j == ny-1){
id -= nyz;
}
energy += Jx * Sx * spin[id];
energy += Jy * Sy * spin[id + n1];
energy += Jz * Sz * spin[id + n2];
}
if (k < nz - 1) {
id = index + 1;
energy += Jx * Sx * spin[id];
energy += Jy * Sy * spin[id + n1];
energy += Jz * Sz * spin[id + n2];
}
}
}
}
energy = -energy;
return energy;
}
/*
compute the effective exchange field at site i
H_i = \sum_<i,j> J_{ij} S_j
with Hamiltonian
Hamiltonian = - \sum_<i,j> J_{ij} S_i \cdot S_j
Note that the pair <i,j> only run once for each pair.
*/
void compute_exch_field_spatial(double *restrict spin, double *restrict field,
double *restrict mu_s_inv,
double *restrict energy,
double *restrict J, int *restrict ngbs,
int n, int n_ngbs) {
#pragma omp parallel for
for (int i = 0; i < n; i++) {
int id = 0;
int id_nn = n_ngbs * i; // index for the neighbours
double fx = 0, fy = 0, fz = 0;
for (int j = 0; j < 6; j++) {
int p = id_nn + j;
if (ngbs[p] >= 0) {
id = 3 * ngbs[p];
fx += J[p] * spin[id];
fy += J[p] * spin[id + 1];
fz += J[p] * spin[id + 2];
}
}
field[3 * i] = fx;
field[3 * i + 1] = fy;
field[3 * i + 2] = fz;
energy[i] = -0.5 * (fx * spin[3 * i] + fy * spin[3 * i + 1] +
fz * spin[3 * i + 2]);
// Scale the field to 1/mu_s
field[3 * i] *= mu_s_inv[i];
field[3 * i + 1] *= mu_s_inv[i];
field[3 * i + 2] *= mu_s_inv[i];
}
}
/* Calculation of Exchange field for up to 8 shells of neighbours
*
* J :: Array with 9 elements: an exchange constant per shell
* Calculation is only up to n_shells (the rest of the elements
* are not used.
* ngbs :: array with the neighbours
* n_ngbs :: number of neighbours per lattice site
* n_shells :: number of specified shells of neighbours
* n_ngbs_shell :: number of neighbours per shell. First element is zero.
* For a hex lattice, this array is: [0, 6, 6, 12, ... ]
* sum_ngbs_shell :: sum of number of neighbours up to the i-th shell to
* locate the column position for the ngbs of a specific shell.
* First element is zero.
* For a hex lattice, this array is: [0, 6, 12, 24, ...]
* Thus, we can locate ngbs from cols 0-5, 6-11, 12, 23, ... etc
*
*/
void compute_full_exch_field(double *restrict spin, double *restrict field,
double *restrict mu_s_inv,
double *restrict energy,
double J[9], int *ngbs, int n, int n_ngbs,
int n_shells, int *restrict n_ngbs_shell,
int *restrict sum_ngbs_shell
) {
#pragma omp parallel for
for (int i = 0; i < n; i++) {
int id = 0;
int id_ngbs = n_ngbs * i; // index for the starting point of neighbours
double fx = 0, fy = 0, fz = 0;
for (int sh = 1; sh < n_shells + 1; sh++) {
for (int j = sum_ngbs_shell[sh - 1]; j < sum_ngbs_shell[sh]; j++) {
if (ngbs[id_ngbs + j] >= 0) {
id = 3 * ngbs[id_ngbs + j];
fx += J[sh - 1] * spin[id];
fy += J[sh - 1] * spin[id + 1];
fz += J[sh - 1] * spin[id + 2];
}
}
}
field[3 * i] = fx;
field[3 * i + 1] = fy;
field[3 * i + 2] = fz;
energy[i] = -0.5 * (fx * spin[3 * i] + fy * spin[3 * i + 1] +
fz * spin[3 * i + 2]);
// Scale the field to 1/mu_s
field[3 * i] *= mu_s_inv[i];
field[3 * i + 1] *= mu_s_inv[i];
field[3 * i + 2] *= mu_s_inv[i];
}
}
|
GB_binop__min_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__min_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__min_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__min_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int32)
// A*D function (colscale): GB (_AxD__min_int32)
// D*A function (rowscale): GB (_DxB__min_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__min_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__min_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int32)
// C=scalar+B GB (_bind1st__min_int32)
// C=scalar+B' GB (_bind1st_tran__min_int32)
// C=A+scalar GB (_bind2nd__min_int32)
// C=A'+scalar GB (_bind2nd_tran__min_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT32 || GxB_NO_MIN_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__min_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__min_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
spmm.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <dgl/runtime/parallel_for.h>
#include <algorithm>
#include <limits>
#include <memory>
#include "spmm_binary_ops.h"
#if !defined(_WIN32)
#ifdef USE_AVX
#include "intel/cpu_support.h"
#ifdef USE_LIBXSMM
#include "spmm_blocking_libxsmm.h"
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
namespace dgl {
namespace aten {
namespace cpu {
#if !defined(_WIN32)
#ifdef USE_AVX
/*!
* \brief CPU kernel of SpMM on Csr format using Xbyak.
* \param cpu_spec JIT'ed kernel
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param X The feature on source nodes.
* \param W The feature on edges.
* \param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. For each edge, it uses the
* JIT'ed kernel.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsrXbyak(dgl::ElemWiseAddUpdate<Op>* cpu_spec, const BcastOff& bcast,
const CSRMatrix& csr, const DType* X, const DType* W, DType* O) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
for (auto rid = b; rid < e; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
cpu_spec->run(out_off, X + cid * lhs_dim, W + eid * rhs_dim, dim);
}
}
});
}
#endif // USE_AVX
#endif // _WIN32
/*!
* \brief Naive CPU kernel of SpMM on Csr format.
* \param cpu_spec JIT'ed kernel
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param X The feature on source nodes.
* \param W The feature on edges.
* \param O The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsrNaive(const BcastOff& bcast, const CSRMatrix& csr, const DType* X,
const DType* W, DType* O) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
for (auto rid = b; rid < e; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
out_off[k] += Op::Call(lhs_off, rhs_off);
}
}
}
});
}
/*!
* \brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsr(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat,
NDArray efeat, NDArray out) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
CHECK_NOTNULL(indptr);
CHECK_NOTNULL(O);
if (Op::use_lhs) {
CHECK_NOTNULL(indices);
CHECK_NOTNULL(X);
}
if (Op::use_rhs) {
if (has_idx)
CHECK_NOTNULL(edges);
CHECK_NOTNULL(W);
}
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
const bool no_libxsmm =
bcast.use_bcast || std::is_same<DType, double>::value;
if (!no_libxsmm) {
SpMMSumCsrLibxsmm<IdType, DType, Op>(bcast, csr, ufeat, efeat, out);
} else {
#endif // USE_LIBXSMM
typedef dgl::ElemWiseAddUpdate<Op> ElemWiseUpd;
/* Prepare an assembler kernel */
static std::unique_ptr<ElemWiseUpd> asm_kernel_ptr(
(dgl::IntelKernel<>::IsEnabled()) ? new ElemWiseUpd() : nullptr);
/* Distribute the kernel among OMP threads */
ElemWiseUpd* cpu_spec = (asm_kernel_ptr && asm_kernel_ptr->applicable())
? asm_kernel_ptr.get()
: nullptr;
if (cpu_spec && dim > 16 && !bcast.use_bcast) {
SpMMSumCsrXbyak<IdType, DType, Op>(cpu_spec, bcast, csr, X, W, O);
} else {
#endif // USE_AVX
#endif // _WIN32
SpMMSumCsrNaive<IdType, DType, Op>(bcast, csr, X, W, O);
#if !defined(_WIN32)
#ifdef USE_AVX
}
#ifdef USE_LIBXSMM
}
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
}
/*!
* \brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCoo(const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat,
NDArray efeat, NDArray out) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = coo.row.Ptr<IdType>();
const IdType* col = coo.col.Ptr<IdType>();
const IdType* edges = coo.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
const int64_t nnz = coo.row->shape[0];
// fill zero elements
memset(O, 0, out.GetSize());
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx ? edges[i] : i;
DType* out_off = O + cid * dim;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (val != 0) {
#pragma omp atomic
out_off[k] += val;
}
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \param arge Arg-Min/Max on edges. which refers the source node
* indices correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \note It uses node parallel strategy, different threads are
* responsible for the computation of different nodes. \note The result will
* contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat,
NDArray efeat, NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
const IdType* indices = static_cast<IdType*>(csr.indices->data);
const IdType* edges =
has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
CHECK_NOTNULL(indptr);
CHECK_NOTNULL(O);
if (Op::use_lhs) {
CHECK_NOTNULL(indices);
CHECK_NOTNULL(X);
CHECK_NOTNULL(argX);
}
if (Op::use_rhs) {
if (has_idx)
CHECK_NOTNULL(edges);
CHECK_NOTNULL(W);
CHECK_NOTNULL(argW);
}
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
const bool no_libxsmm =
bcast.use_bcast || std::is_same<DType, double>::value;
if (!no_libxsmm) {
SpMMCmpCsrLibxsmm<IdType, DType, Op, Cmp>(bcast, csr, ufeat, efeat, out, argu, arge);
} else {
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
for (auto rid = b; rid < e; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
IdType* argx_off = argX + rid * dim;
IdType* argw_off = argW + rid * dim;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs) argx_off[k] = cid;
if (Op::use_rhs) argw_off[k] = eid;
}
}
}
}
});
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
}
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \param arge Arg-Min/Max on edges. which refers the source node
* indices correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer.
* \param argu_ntype Node type of the arg-Min/Max on source nodes, which refers the
* source node types correspond to the minimum/maximum values of reduction result
* on destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge_etype Edge-type of the arg-Min/Max on edges. which refers the source
* node indices correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param src_type Node type of the source nodes of an etype
* \param etype Edge type
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsrHetero(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat,
NDArray efeat, NDArray out, NDArray argu, NDArray arge,
NDArray argu_ntype, NDArray arge_etype,
const int ntype, const int etype) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
const IdType* indices = static_cast<IdType*>(csr.indices->data);
const IdType* edges =
has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
IdType* argX_ntype = Op::use_lhs ? static_cast<IdType*>(argu_ntype->data) : nullptr;
IdType* argW_etype = Op::use_rhs ? static_cast<IdType*>(arge_etype->data) : nullptr;
CHECK_NOTNULL(indptr);
CHECK_NOTNULL(O);
if (Op::use_lhs) {
CHECK_NOTNULL(indices);
CHECK_NOTNULL(X);
CHECK_NOTNULL(argX);
}
if (Op::use_rhs) {
if (has_idx)
CHECK_NOTNULL(edges);
CHECK_NOTNULL(W);
CHECK_NOTNULL(argW);
}
// TODO(Israt): Use LIBXSMM. Homogeneous graph uses LIBXMM when enabled.
runtime::parallel_for(0, csr.num_rows, [&](size_t b, size_t e) {
for (auto rid = b; rid < e; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
IdType* argx_off = argX + rid * dim;
IdType* argw_off = argW + rid * dim;
IdType* argx_ntype = argX_ntype + rid * dim;
IdType* argw_etype = argW_etype + rid * dim;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx ? edges[j] : j;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs) {
argx_off[k] = cid;
argx_ntype[k] = ntype;
}
if (Op::use_rhs) {
argw_off[k] = eid;
argw_etype[k] = etype;
}
}
}
}
}
});
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \param arge Arg-Min/Max on edges. which refers the source node
* indices correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max
* reducer. \note it uses node parallel strategy, different threads are
* responsible for the computation of different nodes. To avoid possible data
* hazard, we use atomic operators in the reduction phase. \note The result will
* contain infinity for zero-degree nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo(const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat,
NDArray efeat, NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = static_cast<IdType*>(coo.row->data);
const IdType* col = static_cast<IdType*>(coo.col->data);
const IdType* edges =
has_idx ? static_cast<IdType*>(coo.data->data) : nullptr;
const DType* X = Op::use_lhs ? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs ? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs ? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs ? static_cast<IdType*>(arge->data) : nullptr;
const int64_t nnz = coo.row->shape[0];
// fill zero elements
std::fill(O, O + out.NumElements(), Cmp::zero);
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx ? edges[i] : i;
DType* out_off = O + cid * dim;
IdType* argx_off = Op::use_lhs ? argX + cid * dim : nullptr;
IdType* argw_off = Op::use_rhs ? argW + cid * dim : nullptr;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off =
Op::use_lhs ? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off =
Op::use_rhs ? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp critical
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs) argx_off[k] = rid;
if (Op::use_rhs) argw_off[k] = eid;
}
}
}
}
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_SPMM_H_
|
NewtonSolver.h | #ifndef included_NewtonSolver
#define included_NewtonSolver
namespace Thermo4PFM
{
template <unsigned int Dimension, class SolverType, typename JacobianDataType>
class NewtonSolver
{
public:
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
/// Solve system of equations for tolerance tol, using at most
/// max_iters iterations
/// Solution: conc
int ComputeSolution(double* conc, const double tol, const int max_iters,
const double alpha = 1.)
{
return ComputeSolutionInternal(conc, tol, max_iters, alpha);
}
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
private:
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
void UpdateSolution(double* const x, const double* const fvec,
JacobianDataType** const fjac, const double alpha);
int ComputeSolutionInternal(double* const conc, const double tol,
const int max_iters, const double alpha = 1.);
void internalRHS(const double* const x, double* const fvec)
{
static_cast<SolverType*>(this)->RHS(x, fvec);
}
template <typename ScalarType>
void CopyMatrix(ScalarType** const dst, ScalarType** const src);
void internalJacobian(const double* const x, JacobianDataType** const fjac)
{
static_cast<SolverType*>(this)->Jacobian(x, fjac);
}
bool CheckTolerance(const double* const fvec, const double tol);
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
};
}
#endif
|
acoPlacement.c | /***************************************************************************
Name : acoPlacement.c
Version : 1.0
Author(s) : Panayiotis Danassis (panos_dan@hotmail.com)
Date : May 6, 2015
Description : 3-D FPGA placement algorithm based on Ant Colony Optimization.
-----------
Copyright (C) 2015 Panayiotis Danassis
School of ECE, National Technical University of Athens.
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include "acoPlacement.h"
#include "ants.h"
#include "inOut.h"
#include "timer.h"
int grid_size;
int numberOfLayers;
int numberOfThreads = 1;
int iteration = 0; /* current iteration */
int foundBetter = FALSE; /* A better solution has been found since last call of print_results() */
double time_used;
/* Main routine for running the ACO algorithm */
int main(int argc, char *argv[]) {
int i;
printf(BOLDWHITE"\nacoPlacement - A 3-D FPGA placement algorithm based on Ant Colony Optimization.\n");
printf("Created by Panayiotis Danassis (panos_dan@hotmail.com) with the valuable contribution of Kostas Siozios (ksiop@microlab.ntua.gr)\n");
printf("This software is licensed under the MIT License (see README.md).\n\n"RESET);
#ifdef DEBUG
printf("------------ <main> ------------\n");
printf("DEBUG MODE ON\n");
#endif
#ifdef PARALLEL
printf("PARALLEL MODE ON\n");
#endif
start_timers();
parse_parameters(argc, argv); /* Set ACO parameters and I/O filenames */
parse_netlist(); /* Read input netlist */
parse_hypernets(); /* Read hypernets */
parse_nets(); /* Read nets */
parse_layers(); /* Read nade's layer info */
time_used = elapsed_time();
printf("Reading Input Files [ OK ]\n");
printf("Reading took %.10f seconds\n", time_used);
start_timers();
init_ants();
printf("Initializing ants [ OK ]\n");
init_timing_matrices();
printf("Initializing timing matrices [ OK ]\n");
time_used = elapsed_time();
printf("Initialization took %.10f seconds\n", time_used);
start_timers();
init_heuristic_matrix();
time_used = elapsed_time();
printf("Initializing heuristic matrix [ OK ]\n");
printf("Initialization took %.10f seconds\n", time_used);
start_timers();
init_pheromone_matrix();
time_used = elapsed_time();
printf("Initializing pheromone matrix [ OK ]\n");
printf("Initialization took %.10f seconds\n", time_used);
start_timers();
init_total_matrix();
time_used = elapsed_time();
printf("Initializing total matrix [ OK ]\n");
printf("Initialization took %.10f seconds\n", time_used);
printf("\n");
print_results();
foundBetter = FALSE;
#ifdef PARALLEL
omp_set_num_threads(numberOfThreads);
#endif
iteration = 1;
while (!termination_condition()) {
printf("Iteration = %d\n", iteration);
//change_parameters_according_to_schedule();
#ifdef PARALLEL
#pragma omp parallel for private(i) shared(iteration, foundBetter, best_so_far_ant)
#endif
for (i = 1; i <= n_ants; i++) {
#ifdef DEBUG
printf("Ant = %d\n", i);
#endif
construct_solution(&ant[i]);
#ifdef PARALLEL
compute_placement_quality_parallel(&ant[i]);
compare_best_so_far_ant(&ant[i]);
#else
compute_placement_quality(&ant[i]);
compare_best_so_far_ant(&ant[i]);
compare_iteration_best_ant(&ant[i]);
local_pheromone_trail_update(&ant[i]);
#endif
}
if (foundBetter == TRUE) {
if (iteration % printStep == 0) {
print_results(); /* Print results periodically */
foundBetter = FALSE;
}
}
if (iteration % restart == 0) {
reinit_pheromone_matrix(); /* Reinitialize pheromone matrix to avoid stagnation */
}
if (iteration % exportPlacementStep == 0) {
export_placement(best_so_far_ant.nodePosition, best_so_far_ant.grid); /* Export placement file */
}
global_pheromone_trail_update();
#ifndef PARALLEL
reinit_iteration_best_ant();
#endif
iteration++;
}
export_placement(best_so_far_ant.nodePosition, best_so_far_ant.grid);
printf("Done!\n");
/* Free all allocated memory */
free_allocated_memory();
#ifdef DEBUG
printf("------------ </main> ------------\n");
#endif
return 0;
} |
DRB063-outeronly1-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
*/
int n = 100, m = 100;
double b[100][100];
int init()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name init#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name init#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<m; j ++ )
{
b[i][j]=(i*j);
}
}
_ret_val_0=0;
return _ret_val_0;
}
void foo()
{
int i, j;
/* Be careful about bounds of j */
#pragma cetus private(i, j)
#pragma loop name foo#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name foo#0#0
for (j=0; j<(m-1); j ++ )
{
b[i][j]=b[i][j+1];
}
}
return ;
}
int print()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name print#0
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name print#0#0
for (j=0; j<m; j ++ )
{
printf("%lf\n", b[i][j]);
}
}
_ret_val_0=0;
return _ret_val_0;
}
int main()
{
int _ret_val_0;
init();
foo();
print();
_ret_val_0=0;
return _ret_val_0;
}
|
fill_nr_3c.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include "config.h"
#include "cint.h"
#include "np_helper/np_helper.h"
#define BLKSIZE 8
int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
/*
* out[naoi,naoj,naok,comp] in F-order
*/
void GTOnr3c_fill_s1(int (*intor)(), double *out, double *buf,
int comp, int jobid,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int nksh = ksh1 - ksh0;
const int ksh = jobid % nksh + ksh0;
const int jstart = jobid / nksh * BLKSIZE + jsh0;
const int jend = MIN(jstart + BLKSIZE, jsh1);
if (jstart >= jend) {
return;
}
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const int dims[] = {naoi, naoj, naok};
const int k0 = ao_loc[ksh] - ao_loc[ksh0];
out += naoi * naoj * k0;
int ish, jsh, i0, j0;
int shls[3] = {0, 0, ksh};
for (jsh = jstart; jsh < jend; jsh++) {
for (ish = ish0; ish < ish1; ish++) {
shls[0] = ish;
shls[1] = jsh;
i0 = ao_loc[ish] - ao_loc[ish0];
j0 = ao_loc[jsh] - ao_loc[jsh0];
(*intor)(out+j0*naoi+i0, dims, shls, atm, natm, bas, nbas, env,
cintopt, buf);
} }
}
static void dcopy_s2_igtj(double *out, double *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
static void dcopy_s2_ieqj(double *out, double *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
/*
* out[comp,naok,nij] in C-order
* nij = i1*(i1+1)/2 - i0*(i0+1)/2
* [ \ ]
* [**** ]
* [***** ]
* [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound
* [ \]
*/
void GTOnr3c_fill_s2ij(int (*intor)(), double *out, double *buf,
int comp, int jobid,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int nksh = ksh1 - ksh0;
const int ksh = jobid % nksh + ksh0;
const int istart = jobid / nksh * BLKSIZE + ish0;
const int iend = MIN(istart + BLKSIZE, ish1);
if (istart >= iend) {
return;
}
const int i0 = ao_loc[ish0];
const int i1 = ao_loc[ish1];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off = i0 * (i0 + 1) / 2;
const size_t nij = i1 * (i1 + 1) / 2 - off;
const size_t nijk = nij * naok;
const int dk = ao_loc[ksh+1] - ao_loc[ksh];
const int k0 = ao_loc[ksh] - ao_loc[ksh0];
out += nij * k0;
int ish, jsh, ip, jp, di, dj;
int shls[3] = {0, 0, ksh};
di = GTOmax_shell_dim(ao_loc, shls_slice, 2);
double *cache = buf + di * di * dk * comp;
double *pout;
for (ish = istart; ish < iend; ish++) {
for (jsh = jsh0; jsh < jsh1; jsh++) {
ip = ao_loc[ish];
jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
continue;
}
shls[0] = ish;
shls[1] = jsh;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache);
pout = out + ip * (ip + 1) / 2 - off + jp;
if (ip != jp) {
dcopy_s2_igtj(pout, buf, comp, ip, nij, nijk, di, dj, dk);
} else {
dcopy_s2_ieqj(pout, buf, comp, ip, nij, nijk, di, dj, dk);
}
} }
}
void GTOnr3c_fill_s2jk(int (*intor)(), double *out, double *buf,
int comp, int jobid,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
fprintf(stderr, "GTOnr3c_fill_s2jk not implemented\n");
exit(1);
}
void GTOnr3c_drv(int (*intor)(), void (*fill)(), double *eri, int comp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const int nksh = ksh1 - ksh0;
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
const int njobs = (MAX(nish,njsh) / BLKSIZE + 1) * nksh;
#pragma omp parallel default(none) \
shared(intor, fill, eri, comp, shls_slice, ao_loc, cintopt, \
atm, natm, bas, nbas, env)
{
int jobid;
double *buf = malloc(sizeof(double) * (di*di*di*comp + cache_size));
#pragma omp for nowait schedule(dynamic)
for (jobid = 0; jobid < njobs; jobid++) {
(*fill)(intor, eri, buf, comp, jobid, shls_slice, ao_loc,
cintopt, atm, natm, bas, nbas, env);
}
free(buf);
}
}
|
pt.c | /*
* Copyright (c) 2016-2018 Ilya Kaliman
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <err.h>
#ifdef LIBPT_USE_MPI
#include <mpi.h>
#endif
#include "pt.h"
extern void *(*libpt_malloc)(size_t);
extern void (*libpt_free)(void *);
void dgemm_(char *, char *, int *, int *, int *, double *, double *,
int *, double *, int *, double *, double *, int *);
static void
gemm(char transa, char transb, int m, int n, int k, double alpha,
const double *a, int lda, const double *b, int ldb, double beta,
double *c, int ldc)
{
dgemm_(&transa, &transb, &m, &n, &k, &alpha, (double *)a, &lda,
(double *)b, &ldb, &beta, c, &ldc);
}
static void
t2_i_ovvv_half(size_t o, size_t v, size_t i, size_t j, size_t k,
double *abc, const double *t2, const double *i_ovvv)
{
const double *t2_p = &t2[i*o*v*v+j*v*v];
const double *i_ovvv_p = &i_ovvv[k*v*v*(v-1)/2];
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', v, v*(v-1)/2, v, 1.0, t2_p, v,
i_ovvv_p, v*(v-1)/2, 0.0, abc, v);
}
static void
t2_baba_i_ovvv_aaaa_half(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_ovvv)
{
const double *t2_p = &t2[i*oa*vb*va+j*vb*va];
const double *i_ovvv_p = &i_ovvv[k*va*va*(va-1)/2];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', vb, va*(va-1)/2, va, 1.0, t2_p, va,
i_ovvv_p, va*(va-1)/2, 0.0, abc, vb);
}
static void
t2_aaaa_i_ovvv_baba(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_ovvv)
{
const double *t2_p = &t2[i*oa*va*va+j*va*va];
const double *i_ovvv_p = &i_ovvv[k*va*vb*va];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', va, va*vb, va, 1.0, t2_p, va,
i_ovvv_p, va*vb, 0.0, abc, va);
}
static void
t2_abab_i_ovvv_abab(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_ovvv)
{
const double *t2_p = &t2[i*ob*va*vb+j*va*vb];
const double *i_ovvv_p = &i_ovvv[k*vb*va*vb];
(void)oa; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', va, va*vb, vb, 1.0, t2_p, vb,
i_ovvv_p, va*vb, 0.0, abc, va);
}
static void
t2_i_oovo(size_t o, size_t v, size_t i, size_t j, size_t k,
double *abc, const double *t2, const double *i_oovo)
{
const double *t2_p = &t2[i*o*v*v];
const double *i_oovo_p = &i_oovo[j*o*o*v+k*o*v];
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', v*v, v, o, 1.0, t2_p, v*v,
i_oovo_p, o, 0.0, abc, v*v);
}
static void
t2_aaaa_i_oovo_baba(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_oovo)
{
const double *t2_p = &t2[i*oa*va*va];
const double *i_oovo_p = &i_oovo[j*oa*vb*oa+k*vb*oa];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*va, vb, oa, 1.0, t2_p, va*va,
i_oovo_p, oa, 0.0, abc, va*va);
}
static void
t2_abab_i_oovo_abab(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_oovo)
{
const double *t2_p = &t2[i*ob*va*vb];
const double *i_oovo_p = &i_oovo[j*ob*va*ob+k*va*ob];
(void)oa; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*vb, va, ob, 1.0, t2_p, va*vb,
i_oovo_p, ob, 0.0, abc, va*vb);
}
static void
t2_baba_i_oovo_aaaa(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_oovo)
{
const double *t2_p = &t2[i*oa*vb*va];
const double *i_oovo_p = &i_oovo[j*oa*va*oa+k*va*oa];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*vb, va, oa, 1.0, t2_p, va*vb,
i_oovo_p, oa, 0.0, abc, va*vb);
}
static double
i_jk_a_bc_ov_oovv(size_t o, size_t v, const double *ov, const double *oovv,
size_t i, size_t j, size_t k, size_t a, size_t b, size_t c)
{
return +ov[i*v+a]*oovv[j*o*v*v+k*v*v+b*v+c]
-ov[j*v+a]*oovv[i*o*v*v+k*v*v+b*v+c]
-ov[k*v+a]*oovv[j*o*v*v+i*v*v+b*v+c]
-ov[i*v+b]*oovv[j*o*v*v+k*v*v+a*v+c]
+ov[j*v+b]*oovv[i*o*v*v+k*v*v+a*v+c]
+ov[k*v+b]*oovv[j*o*v*v+i*v*v+a*v+c]
-ov[i*v+c]*oovv[j*o*v*v+k*v*v+b*v+a]
+ov[j*v+c]*oovv[i*o*v*v+k*v*v+b*v+a]
+ov[k*v+c]*oovv[j*o*v*v+i*v*v+b*v+a];
}
static double
comp_t3b_ijkabc(size_t v1, size_t o2, size_t v2a, size_t v2b,
size_t i, size_t j, size_t k, size_t a, size_t b, size_t c,
const double *t1, const double *i_oovv, const double *f_ov,
const double *t2)
{
return t1[i*v1+a] * i_oovv[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c] +
f_ov[i*v1+a] * t2[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c];
}
static double
cc_pt_aaa(size_t oa, size_t va, const double *d_ov, const double *f_ov,
const double *t1, const double *t2_aaaa, const double *i_oovo_aaaa,
const double *i_oovv_aaaa, const double *i_ovvv_aaaa)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, it, *ijk, nijk = 0;
double *t3ax1, *abc1;
if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = j+1; k < oa; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((t3ax1 = libpt_malloc(2*va*va*va*sizeof(*t3ax1))) == NULL)
err(1, "libpt malloc work");
abc1 = t3ax1 + va*va*va;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_i_ovvv_half(oa,va,i,j,k,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < b; c++) {
double t3ax, t3bx, dn;
t3ax1[a*va*va+b*va+c] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c];
t3ax = t3ax1[a*va*va+b*va+c];
t3bx = +i_jk_a_bc_ov_oovv(oa,va,t1,i_oovv_aaaa,i,j,k,a,b,c)
+i_jk_a_bc_ov_oovv(oa,va,f_ov,t2_aaaa,i,j,k,a,b,c);
e_pt += t3ax * (t3ax-t3bx) / dn;
}}}
}
libpt_free(ijk);
libpt_free(t3ax1);
}
return (e_pt);
}
static double
cc_pt_aab(size_t oa, size_t va, size_t ob, size_t vb,
const double *d_ov_aa, const double *d_ov_bb,
const double *f_ov_aa, const double *f_ov_bb,
const double *t1_aa, const double *t1_bb,
const double *t2_aaaa, const double *t2_abab, const double *t2_baba,
const double *i_oovo_aaaa, const double *i_oovo_abab,
const double *i_oovo_baba, const double *i_oovv_aaaa,
const double *i_oovv_abab, const double *i_ovvv_aaaa,
const double *i_ovvv_abab, const double *i_ovvv_baba)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0 || ob == 0 || vb == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, it, *ijk, nijk = 0;
double *t3ax1, *abc1, *abc11, *abc12;
if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = 0; k < ob; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((t3ax1 = libpt_malloc(2*va*va*vb*sizeof(*t3ax1))) == NULL)
err(1, "libpt malloc work");
abc1 = t3ax1 + va*va*vb;
abc11 = t3ax1 + va*va*vb;
abc12 = t3ax1 + va*va*vb + vb*va*(va-1)/2;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i_ovvv_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i_ovvv_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i_ovvv_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i_oovo_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i_oovo_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i_oovo_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i_oovo_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i_oovo_aaaa);
for (a = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < vb; c++) {
double t3ax, t3bx, dn;
t3ax1[a*va*vb+b*vb+c] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t3bx = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
+comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
+comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a,
t1_bb,i_oovv_aaaa,f_ov_bb,t2_aaaa);
dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c];
t3ax = t3ax1[a*va*vb+b*vb+c];
e_pt += t3ax * (t3ax-t3bx) / dn;
}}}
}
libpt_free(ijk);
libpt_free(t3ax1);
}
return (e_pt);
}
double
libpt_rpt(size_t oa, size_t va, const double *d_ov, const double *f_ov,
const double *t1, const double *t2, const double *i_oovo,
const double *i_oovv, const double *i_ovvv)
{
double e_pt = 0.0;
const double *t2_aaaa = t2;
const double *t2_abab = t2 + oa*oa*va*va;
const double *i_ovvv_aaaa = i_ovvv;
const double *i_ovvv_abab = i_ovvv + oa*va*va*(va-1)/2;
const double *i_oovo_aaaa = i_oovo;
const double *i_oovo_abab = i_oovo + oa*oa*oa*va;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv + oa*oa*va*va;
e_pt += cc_pt_aaa(oa, va, d_ov, f_ov, t1, t2_aaaa,
i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa);
e_pt += cc_pt_aab(oa, va, oa, va, d_ov, d_ov, f_ov, f_ov, t1, t1,
t2_aaaa, t2_abab, t2_abab, i_oovo_aaaa, i_oovo_abab, i_oovo_abab,
i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab, i_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return 2.0 * e_pt;
}
double
libpt_upt(size_t oa, size_t va, size_t ob, size_t vb, const double *d_ov,
const double *f_ov, const double *t1, const double *t2,
const double *i_oovo, const double *i_oovv, const double *i_ovvv)
{
double e_pt = 0.0;
const double *d_ov_aa = d_ov;
const double *d_ov_bb = d_ov_aa + oa*va;
const double *f_ov_aa = f_ov;
const double *f_ov_bb = f_ov_aa + oa*va;
const double *t1_aa = t1;
const double *t1_bb = t1_aa + oa*va;
const double *t2_aaaa = t2;
const double *t2_abab = t2_aaaa + oa*oa*va*va;
const double *t2_bbbb = t2_abab + oa*ob*va*vb;
const double *t2_baba = t2_bbbb + ob*ob*vb*vb;
const double *i_oovo_aaaa = i_oovo;
const double *i_oovo_abab = i_oovo_aaaa + oa*oa*va*oa;
const double *i_oovo_bbbb = i_oovo_abab + oa*ob*va*ob;
const double *i_oovo_baba = i_oovo_bbbb + ob*ob*vb*ob;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va;
const double *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb;
const double *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb;
const double *i_ovvv_aaaa = i_ovvv;
const double *i_ovvv_abab = i_ovvv_aaaa + oa*va*va*(va-1)/2;
const double *i_ovvv_bbbb = i_ovvv_abab + oa*vb*va*vb;
const double *i_ovvv_baba = i_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
/* aaaaaa */
e_pt += cc_pt_aaa(oa, va, d_ov_aa, f_ov_aa, t1_aa, t2_aaaa,
i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa);
/* bbbbbb */
e_pt += cc_pt_aaa(ob, vb, d_ov_bb, f_ov_bb, t1_bb, t2_bbbb,
i_oovo_bbbb, i_oovv_bbbb, i_ovvv_bbbb);
/* aabaab */
e_pt += cc_pt_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f_ov_aa, f_ov_bb,
t1_aa, t1_bb, t2_aaaa, t2_abab, t2_baba, i_oovo_aaaa, i_oovo_abab,
i_oovo_baba, i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab,
i_ovvv_baba);
/* bbabba */
e_pt += cc_pt_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f_ov_bb, f_ov_aa,
t1_bb, t1_aa, t2_bbbb, t2_baba, t2_abab, i_oovo_bbbb, i_oovo_baba,
i_oovo_abab, i_oovv_bbbb, i_oovv_baba, i_ovvv_bbbb, i_ovvv_baba,
i_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return e_pt;
}
static double
cc_ft_aaa(size_t oa, size_t va, const double *d_ov, const double *f2_ov,
const double *l1, const double *t2, const double *l2, const double *i_oovv,
const double *i2_t2f2_oovo, const double *i3_ovvv, const double *i6_oovo,
const double *i7_ovvv)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0;
double *sigvvvl, *sigvvvr, *abc1;
if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = j+1; k < oa; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((sigvvvl = libpt_malloc(2*va*va*va*sizeof(*sigvvvl))) == NULL)
err(1, "libpt malloc work");
sigvvvr = sigvvvl + va*va*(va-1)/2;
abc1 = sigvvvl + va*va*va;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_i_ovvv_half(oa,va,i,j,k,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_ovvv_half(oa,va,i,j,k,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < b; c++, t++) {
double dn, l1t;
sigvvvr[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c];
l1t = +i_jk_a_bc_ov_oovv(oa,va,l1,i_oovv,i,j,k,a,b,c)
+i_jk_a_bc_ov_oovv(oa,va,f2_ov,l2,i,j,k,a,b,c);
e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn;
}}}
}
libpt_free(ijk);
libpt_free(sigvvvl);
}
return (e_pt);
}
static double
cc_ft_aab(size_t oa, size_t va, size_t ob, size_t vb,
const double *d_ov_aa, const double *d_ov_bb,
const double *f2_ov_aa, const double *f2_ov_bb,
const double *l1_aa, const double *l1_bb,
const double *t2_aaaa, const double *t2_abab, const double *t2_baba,
const double *l2_aaaa, const double *l2_abab, const double *l2_baba,
const double *i_oovv_aaaa, const double *i_oovv_abab,
const double *i2_t2f2_oovo_aaaa, const double *i2_t2f2_oovo_abab,
const double *i2_t2f2_oovo_baba,
const double *i3_ovvv_aaaa, const double *i3_ovvv_abab,
const double *i3_ovvv_baba,
const double *i6_oovo_aaaa, const double *i6_oovo_abab,
const double *i6_oovo_baba,
const double *i7_ovvv_aaaa, const double *i7_ovvv_abab,
const double *i7_ovvv_baba)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0 || ob == 0 || vb == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0;
double *sigvvvl, *sigvvvr, *abc1, *abc11, *abc12;
if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = 0; k < ob; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((sigvvvl = libpt_malloc(2*va*va*vb*sizeof(*sigvvvl))) == NULL)
err(1, "libpt malloc work");
sigvvvr = sigvvvl + vb*va*(va-1)/2;
abc1 = sigvvvl + va*va*vb;
abc11 = sigvvvl + va*va*vb;
abc12 = sigvvvl + va*va*vb + vb*va*(va-1)/2;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,l2_aaaa,i7_ovvv_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,l2_abab,i7_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,l2_abab,i7_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,l2_baba,i7_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,l2_baba,i7_ovvv_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,l2_aaaa,i6_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,l2_aaaa,i6_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,l2_abab,i6_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,l2_abab,i6_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,l2_baba,i6_oovo_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i3_ovvv_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i3_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i3_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i3_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i3_ovvv_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i2_t2f2_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i2_t2f2_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i2_t2f2_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i2_t2f2_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i2_t2f2_oovo_aaaa);
for (a = 0, t = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < vb; c++, t++) {
double dn, l1t;
sigvvvr[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
l1t = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
+comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
+comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a,
l1_bb,i_oovv_aaaa,f2_ov_bb,l2_aaaa);
dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c];
e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn;
}}}
}
libpt_free(ijk);
libpt_free(sigvvvl);
}
return (e_pt);
}
double
libpt_rft(size_t oa, size_t va, const double *d_ov, const double *f2_ov,
const double *l1, const double *t2, const double *l2, const double *i_oovv,
const double *i2_t2f2_oovo, const double *i3_ovvv, const double *i6_oovo,
const double *i7_ovvv)
{
double e_pt = 0.0;
const double *t2_aaaa = t2;
const double *t2_abab = t2 + oa*oa*va*va;
const double *l2_aaaa = l2;
const double *l2_abab = l2 + oa*oa*va*va;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv + oa*oa*va*va;
const double *i2_t2f2_oovo_aaaa = i2_t2f2_oovo;
const double *i2_t2f2_oovo_abab = i2_t2f2_oovo + oa*oa*oa*va;
const double *i3_ovvv_aaaa = i3_ovvv;
const double *i3_ovvv_abab = i3_ovvv + oa*va*va*(va-1)/2;
const double *i6_oovo_aaaa = i6_oovo;
const double *i6_oovo_abab = i6_oovo + oa*oa*oa*va;
const double *i7_ovvv_aaaa = i7_ovvv;
const double *i7_ovvv_abab = i7_ovvv + oa*va*va*(va-1)/2;
e_pt += cc_ft_aaa(oa, va, d_ov, f2_ov, l1, t2_aaaa, l2_aaaa,
i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa,
i7_ovvv_aaaa);
e_pt += cc_ft_aab(oa, va, oa, va, d_ov, d_ov, f2_ov, f2_ov,
l1, l1, t2_aaaa, t2_abab, t2_abab, l2_aaaa, l2_abab, l2_abab,
i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab,
i2_t2f2_oovo_abab, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_abab,
i6_oovo_aaaa, i6_oovo_abab, i6_oovo_abab,
i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return 2.0 * e_pt;
}
double
libpt_uft(size_t oa, size_t va, size_t ob, size_t vb, const double *d_ov,
const double *f2_ov, const double *l1, const double *t2, const double *l2,
const double *i_oovv, const double *i2_t2f2_oovo, const double *i3_ovvv,
const double *i6_oovo, const double *i7_ovvv)
{
double e_pt = 0.0;
const double *d_ov_aa = d_ov;
const double *d_ov_bb = d_ov_aa + oa*va;
const double *f2_ov_aa = f2_ov;
const double *f2_ov_bb = f2_ov_aa + oa*va;
const double *l1_aa = l1;
const double *l1_bb = l1_aa + oa*va;
const double *t2_aaaa = t2;
const double *t2_abab = t2_aaaa + oa*oa*va*va;
const double *t2_bbbb = t2_abab + oa*ob*va*vb;
const double *t2_baba = t2_bbbb + ob*ob*vb*vb;
const double *l2_aaaa = l2;
const double *l2_abab = l2_aaaa + oa*oa*va*va;
const double *l2_bbbb = l2_abab + oa*ob*va*vb;
const double *l2_baba = l2_bbbb + ob*ob*vb*vb;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va;
const double *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb;
const double *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb;
const double *i2_t2f2_oovo_aaaa = i2_t2f2_oovo;
const double *i2_t2f2_oovo_abab = i2_t2f2_oovo_aaaa + oa*oa*va*oa;
const double *i2_t2f2_oovo_bbbb = i2_t2f2_oovo_abab + oa*ob*va*ob;
const double *i2_t2f2_oovo_baba = i2_t2f2_oovo_bbbb + ob*ob*vb*ob;
const double *i3_ovvv_aaaa = i3_ovvv;
const double *i3_ovvv_abab = i3_ovvv_aaaa + oa*va*va*(va-1)/2;
const double *i3_ovvv_bbbb = i3_ovvv_abab + oa*vb*va*vb;
const double *i3_ovvv_baba = i3_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
const double *i6_oovo_aaaa = i6_oovo;
const double *i6_oovo_abab = i6_oovo_aaaa + oa*oa*va*oa;
const double *i6_oovo_bbbb = i6_oovo_abab + oa*ob*va*ob;
const double *i6_oovo_baba = i6_oovo_bbbb + ob*ob*vb*ob;
const double *i7_ovvv_aaaa = i7_ovvv;
const double *i7_ovvv_abab = i7_ovvv_aaaa + oa*va*va*(va-1)/2;
const double *i7_ovvv_bbbb = i7_ovvv_abab + oa*vb*va*vb;
const double *i7_ovvv_baba = i7_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
/* aaaaaa */
e_pt += cc_ft_aaa(oa, va, d_ov_aa, f2_ov_aa, l1_aa, t2_aaaa, l2_aaaa,
i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa,
i7_ovvv_aaaa);
/* bbbbbb */
e_pt += cc_ft_aaa(ob, vb, d_ov_bb, f2_ov_bb, l1_bb, t2_bbbb, l2_bbbb,
i_oovv_bbbb, i2_t2f2_oovo_bbbb, i3_ovvv_bbbb, i6_oovo_bbbb,
i7_ovvv_bbbb);
/* aabaab */
e_pt += cc_ft_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f2_ov_aa, f2_ov_bb,
l1_aa, l1_bb, t2_aaaa, t2_abab, t2_baba, l2_aaaa, l2_abab, l2_baba,
i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab,
i2_t2f2_oovo_baba, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_baba,
i6_oovo_aaaa, i6_oovo_abab, i6_oovo_baba,
i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_baba);
/* bbabba */
e_pt += cc_ft_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f2_ov_bb, f2_ov_aa,
l1_bb, l1_aa, t2_bbbb, t2_baba, t2_abab, l2_bbbb, l2_baba, l2_abab,
i_oovv_bbbb, i_oovv_baba, i2_t2f2_oovo_bbbb, i2_t2f2_oovo_baba,
i2_t2f2_oovo_abab, i3_ovvv_bbbb, i3_ovvv_baba, i3_ovvv_abab,
i6_oovo_bbbb, i6_oovo_baba, i6_oovo_abab,
i7_ovvv_bbbb, i7_ovvv_baba, i7_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return e_pt;
}
|
mixedup_linear_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_MIXEDUP_SOLVER_H_INCLUDED )
#define KRATOS_MIXEDUP_SOLVER_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <sstream>
#include <cstddef>
// External includes
// Project includes
#include "includes/define.h"
#include "reorderer.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "linear_solvers/iterative_solver.h"
#include <boost/numeric/ublas/vector.hpp>
#include "utilities/openmp_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/** This solver is designed for the solution of mixed U-P problems.
* It uses a block structure diving the matrix in UU PP UP PU blocks
* and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part
*/
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class MixedUPLinearSolver :
public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of MixedUPLinearSolver
KRATOS_CLASS_POINTER_DEFINITION (MixedUPLinearSolver);
typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType;
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
typedef typename TSparseSpaceType::VectorType VectorType;
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
typedef typename TDenseSpaceType::VectorType DenseVectorType;
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
MixedUPLinearSolver (typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_UU_block,
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_PP_block,
double NewMaxTolerance,
unsigned int NewMaxIterationsNumber,
unsigned int m
) : BaseType (NewMaxTolerance, NewMaxIterationsNumber)
{
//saving the linear solvers to be used in the solution process
mpsolver_UU_block = psolver_UU_block;
mpsolver_PP_block = psolver_PP_block;
mBlocksAreAllocated = false;
mis_initialized = false;
mm = m;
}
MixedUPLinearSolver(Parameters settings,
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_UU_block,
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_PP_block
): BaseType ()
{
KRATOS_TRY
Parameters default_parameters( R"(
{
"solver_type": "MixedUPLinearSolver",
"velocity_solver" : {
"solver_type":"BICGSTABSolver"
},
"pressure_solver" : {
"solver_type":"CGSolver"
}
"tolerance" : 1.0e-6,
"max_iteration" : 200,
"gmres_krylov_space_dimension" : 100
} )" );
//now validate agains defaults -- this also ensures no type mismatch
settings.ValidateAndAssignDefaults(default_parameters);
this->SetTolerance( settings["tolerance"].GetDouble() );
this->SetMaxIterationsNumber( settings["max_iteration"].GetInt() );
mm = settings["gmres_krylov_space_dimension"].GetInt();
//storing other data
mpsolver_UU_block = psolver_UU_block;
mpsolver_PP_block = psolver_PP_block;
mBlocksAreAllocated = false;
mis_initialized = false;
KRATOS_CATCH("")
}
/// Copy constructor.
MixedUPLinearSolver (const MixedUPLinearSolver& Other)
{
KRATOS_THROW_ERROR (std::logic_error,"copy constructor not correctly implemented","");
}
/// Destructor.
~MixedUPLinearSolver() override {}
///@}
///@name Operators
///@{
/// Assignment operator.
MixedUPLinearSolver& operator= (const MixedUPLinearSolver& Other)
{
return *this;
}
///@}
///@name Operations
///@{
/** This function is designed to be called as few times as possible. It creates the data structures
* that only depend on the connectivity of the matrix (and not on its coefficients)
* so that the memory can be allocated once and expensive operations can be done only when strictly
* needed
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void Initialize (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
if (mBlocksAreAllocated == true)
{
mpsolver_UU_block->Initialize(mK, mu, mru);
mpsolver_PP_block->Initialize(mS, mp, mrp);
mis_initialized = true;
}
else
{
std::cout << "linear solver intialization is deferred to the moment at which blocks are available" << std::endl;
}
}
/** This function is designed to be called every time the coefficients change in the system
* that is, normally at the beginning of each solve.
* For example if we are implementing a direct solver, this is the place to do the factorization
* so that then the backward substitution can be performed effectively more than once
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
//copy to local matrices
if (mBlocksAreAllocated == false)
{
FillBlockMatrices (true, rA, mK, mG, mD, mS);
mBlocksAreAllocated = true;
}
else
{
FillBlockMatrices (false, rA, mK, mG, mD, mS);
mBlocksAreAllocated = true;
}
if(mis_initialized == false) this->Initialize(rA,rX,rB);
//initialize solvers
mpsolver_UU_block->InitializeSolutionStep(mK, mu, mru);
mpsolver_PP_block->InitializeSolutionStep(mS, mp, mrp);
}
/** This function actually performs the solution work, eventually taking advantage of what was done before in the
* Initialize and InitializeSolutionStep functions.
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void PerformSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
unsigned int m = mm;
unsigned int max_iter = BaseType::GetMaxIterationsNumber();
double tol = BaseType::GetTolerance();
gmres_solve (rA,rX,rB,m,max_iter,tol);
}
/** This function is designed to be called at the end of the solve step.
* for example this is the place to remove any data that we do not want to save for later
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
mpsolver_UU_block->FinalizeSolutionStep(mK, mu, mru);
mpsolver_PP_block->FinalizeSolutionStep(mS, mp, mrp);
}
/** This function is designed to clean up all internal data in the solver.
* Clear is designed to leave the solver object as if newly created.
* After a clear a new Initialize is needed
*/
void Clear() override
{
mK.clear();
mG.clear();
mD.clear();
mS.clear();
mBlocksAreAllocated = false;
mpsolver_UU_block->Clear();
mpsolver_PP_block->Clear();
mu.clear();
mp.clear();
mru.clear();
mrp.clear();
mis_initialized = false;
}
/** Normal solve method.
Solves the linear system Ax=b and puts the result on SystemVector& rX.
rVectorx is also th initial guess for iterative methods.
@param rA. System matrix
@param rX. Solution vector. it's also the initial
guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
if (mis_initialized == false)
this->Initialize (rA,rX,rB);
this->InitializeSolutionStep (rA,rX,rB);
this->PerformSolutionStep (rA,rX,rB);
this->FinalizeSolutionStep (rA,rX,rB);
return false;
}
/** Multi solve method for solving a set of linear systems with same coefficient matrix.
Solves the linear system Ax=b and puts the result on SystemVector& rX.
rVectorx is also th initial guess for iterative methods.
@param rA. System matrix
@param rX. Solution vector. it's also the initial
guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
bool Solve (SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB) override
{
return false;
}
/** Eigenvalue and eigenvector solve method for derived eigensolvers */
void Solve (SparseMatrixType& K,
SparseMatrixType& M,
DenseVectorType& Eigenvalues,
DenseMatrixType& Eigenvectors) override
{}
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function tells if the solver requires such data
*/
bool AdditionalPhysicalDataIsNeeded() override
{
return true;
}
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function is the place to eventually provide such data
*/
void ProvideAdditionalData (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB,
typename ModelPart::DofsArrayType& rdof_set,
ModelPart& r_model_part
) override
{
//count pressure dofs
unsigned int n_pressure_dofs = 0;
unsigned int tot_active_dofs = 0;
for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++)
{
if (it->EquationId() < rA.size1())
{
tot_active_dofs += 1;
if (it->GetVariable().Key() == PRESSURE)
n_pressure_dofs += 1;
}
}
if (tot_active_dofs != rA.size1() )
KRATOS_THROW_ERROR (std::logic_error,"total system size does not coincide with the free dof map","");
//resize arrays as needed
mpressure_indices.resize (n_pressure_dofs,false);
unsigned int other_dof_size = tot_active_dofs - n_pressure_dofs;
mother_indices.resize (other_dof_size,false);
mglobal_to_local_indexing.resize (tot_active_dofs,false);
mis_pressure_block.resize (tot_active_dofs,false);
//construct aux_lists as needed
//"other_counter[i]" i will contain the position in the global system of the i-th NON-pressure node
//"pressure_counter[i]" will contain the in the global system of the i-th NON-pressure node
//
//mglobal_to_local_indexing[i] will contain the position in the local blocks of the
unsigned int pressure_counter = 0;
unsigned int other_counter = 0;
unsigned int global_pos = 0;
for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++)
{
if (it->EquationId() < rA.size1())
{
if (it->GetVariable().Key() == PRESSURE)
{
mpressure_indices[pressure_counter] = global_pos;
mglobal_to_local_indexing[global_pos] = pressure_counter;
mis_pressure_block[global_pos] = true;
pressure_counter++;
}
else
{
mother_indices[other_counter] = global_pos;
mglobal_to_local_indexing[global_pos] = other_counter;
mis_pressure_block[global_pos] = false;
other_counter++;
}
global_pos++;
}
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "Linear solver";
}
/// Print information about this object.
void PrintInfo (std::ostream& rOStream) const override
{
rOStream << "Linear solver";
}
/// Print object's data.
void PrintData (std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///this function generates the subblocks of matrix A
///as A = ( K G ) u
/// ( D S ) p
/// subblocks are allocated or nor depending on the value of "need_allocation"
void FillBlockMatrices (bool need_allocation, SparseMatrixType& rA, SparseMatrixType& K, SparseMatrixType& G, SparseMatrixType& D, SparseMatrixType& S )
{
KRATOS_TRY
//get access to A data
const std::size_t* index1 = rA.index1_data().begin();
const std::size_t* index2 = rA.index2_data().begin();
const double* values = rA.value_data().begin();
SparseMatrixType L(mpressure_indices.size(),mpressure_indices.size() );
if (need_allocation == true)
{
K.clear();
G.clear();
D.clear();
S.clear();
L.clear();
//do allocation
K.resize (mother_indices.size() ,mother_indices.size() );
G.resize (mother_indices.size() ,mpressure_indices.size() );
D.resize (mpressure_indices.size(),mother_indices.size() );
S.resize (mpressure_indices.size(),mpressure_indices.size() );
mrp.resize(mpressure_indices.size() );
mru.resize(mother_indices.size() );
mp.resize(mpressure_indices.size());
mu.resize(mother_indices.size());
//KRATOS_WATCH (mglobal_to_local_indexing);
//allocate the blocks by push_back
for (unsigned int i=0; i<rA.size1(); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
unsigned int local_row_id = mglobal_to_local_indexing[i];
if ( mis_pressure_block[i] == false) //either K or G
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //K block
K.push_back ( local_row_id, local_col_id, value);
else //G block
G.push_back ( local_row_id, local_col_id, value);
}
}
else //either D or S
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //D block
D.push_back ( local_row_id, local_col_id, value);
else //S block
L.push_back ( local_row_id, local_col_id, value);
}
}
}
//allocate the schur complement
ConstructSystemMatrix(S,G,D,L);
VectorType diagK (mother_indices.size() );
ComputeDiagonalByLumping (K,diagK);
//fill the shur complement
CalculateShurComplement(S,K,G,D,L,diagK);
}
else //allocation is not needed so only do copying
{
for (unsigned int i=0; i<rA.size1(); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
unsigned int local_row_id = mglobal_to_local_indexing[i];
if ( mis_pressure_block[i] == false ) //either K or G
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //K block
K( local_row_id, local_col_id) = value;
else //G block
G( local_row_id, local_col_id) = value;
}
}
else //either D or S
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //D block
D( local_row_id, local_col_id) = value;
else //S block
L( local_row_id, local_col_id) = value;
}
}
}
VectorType diagK (mother_indices.size() );
ComputeDiagonalByLumping (K,diagK);
//fill the shur complement
CalculateShurComplement(S,K,G,D,L,diagK);
}
KRATOS_CATCH ("")
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// A counted pointer to the reorderer object.
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_UU_block;
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_PP_block;
unsigned int mm;
bool mBlocksAreAllocated;
bool mis_initialized;
DenseVector<unsigned int> mpressure_indices;
DenseVector<unsigned int> mother_indices;
DenseVector<int> mglobal_to_local_indexing;
DenseVector<int> mis_pressure_block;
SparseMatrixType mK;
SparseMatrixType mG;
SparseMatrixType mD;
SparseMatrixType mS;
VectorType mrp;
VectorType mru;
VectorType mp;
VectorType mu;
///@}
///@name Private Operators
///@{
inline void GeneratePlaneRotation (const double &dx, const double &dy, double &cs, double &sn)
{
if (dy == 0.0)
{
cs = 1.0;
sn = 0.0;
}
else if (dx == 0.0)
{
cs = 0.0;
sn = 1.0;
}
else
{
const double rnorm = 1.0/sqrt (dx*dx + dy*dy);
cs = fabs (dx) * rnorm;
sn = cs * dy / dx;
}
}
inline void ApplyPlaneRotation (double &dx, double &dy, const double &cs, const double &sn)
{
double temp = cs * dx + sn * dy;
dy = cs * dy - sn * dx;
dx = temp;
}
void Update (VectorType& y, VectorType& x, int k, Matrix& h, VectorType& s, std::vector< VectorType >& V)
{
for (unsigned int i=0; i<s.size(); i++)
y[i] = s[i];
/* for(unsigned int i=s.size(); i<y.size(); i++)
y[i] = 0.0;*/
// Backsolve:
for (int i = k; i >= 0; --i)
{
y (i) /= h (i,i);
for (int j = i - 1; j >= 0; --j)
y (j) -= h (j,i) * y (i);
}
//create new search dir
for (int j = 0; j <= k; ++j)
TSparseSpaceType::UnaliasedAdd (x, y[j], V[j]); // x += y(j)* V[j];
}
int gmres_solve ( SparseMatrixType& A,
VectorType& x,
const VectorType& b,
unsigned int& m,
unsigned int& max_iter,
double& tol)
{
const unsigned int dim = A.size1();
if (m == 0)
KRATOS_THROW_ERROR (std::logic_error,"the dimension of the GMRES krylov space can not be set to zero. Please change the value of m","")
if (m > max_iter)
m = max_iter;
VectorType s (m+1), sn (m+1), w (dim), r (dim), y (m+1);
VectorType cs (m+1);
Matrix H (m+1, m+1);
int restart = 0;
//preconditioner solve b and store in Minv_b
VectorType preconditioned_b (dim);
//TSparseSpaceType::Copy(b, preconditioned_b); //preconditioned_b=b
//apply preconditioner
SolveBlockPreconditioner (b,preconditioned_b);
double normb = TSparseSpaceType::TwoNorm (preconditioned_b);
/*KRATOS_WATCH(normb);*/
if (normb < 1e-16) //ARBITRARY SMALL NUMBER!
{
normb = 1e-16;
}
//r = b - Ax
TSparseSpaceType::Mult (A,x,r);
TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r
//apply preconditioner and overwrite r
SolveBlockPreconditioner (r,r);
const double rel_tol = tol*normb;
double beta = TSparseSpaceType::TwoNorm (r);
if (beta <= rel_tol) //finalize!
{
tol = beta / normb;
max_iter = 0;
return 0;
}
unsigned int j;
// int err = 0;
std::vector< VectorType > V (m+1);
for (j = 0; j <= m; ++j)
V[j].resize (dim,false);
j = 1;
while (j <= max_iter)
{
TSparseSpaceType::Assign (V[0], 1.0/beta, r); //V[0] = r /(T)beta;
TSparseSpaceType::SetToZero (s);
s[0] = beta;
for (unsigned int i = 0; (i < m) && (j <= max_iter); ++i, ++j)
{
TSparseSpaceType::Mult (A,V[i],w); //w = A*V[i];
//apply preconditioner and overwrite r
SolveBlockPreconditioner (w,w);
for (unsigned int k = 0; k <= i; k++)
{
H (k, i) = TSparseSpaceType::Dot (V[k], w);
w -= H (k, i) * V[k];
}
const double normw = TSparseSpaceType::TwoNorm (w);
H (i+1, i) = normw;
/*KRATOS_WATCH(normw);*/
// This breakdown is a good one ...
if (normw == 0)
TSparseSpaceType::Copy (V[i+1], w); //V[i+1] = w;
else
TSparseSpaceType::Assign (V[i+1], 1.0/normw, w); //V[i+1] = w / normw;
for (unsigned int k = 0; k < i; k++)
ApplyPlaneRotation (H (k,i), H (k+1,i), cs (k), sn (k) );
GeneratePlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) );
ApplyPlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) );
ApplyPlaneRotation (s (i), s (i+1), cs (i), sn (i) );
beta = fabs (s (i+1) );
std::cout << "iter = " << j << " estimated res ratio = " << beta << std::endl;
// KRATOS_WATCH (beta);
if (beta <= rel_tol)
{
this->Update (y, x, i, H, s, V);
return 0;
}
}
this->Update (y,x, m - 1, H, s, V);
//r = b - Ax
TSparseSpaceType::Mult (A,x,r);
TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r
beta = TSparseSpaceType::TwoNorm (r);
std::cout << "number of iterations at convergence = " << j << std::endl;
if (beta < rel_tol)
{
return 0;
}
++restart;
}
// err = 1;
return 1;
}
//this function extracts from a vector which has the size of the
//overall r, the part that corresponds to u-dofs
void GetUPart (const VectorType& rtot, VectorType& ru)
{
if (ru.size() != mother_indices.size() )
ru.resize (mother_indices.size(), false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(ru.size()); i++)
ru[i] = rtot[mother_indices[i]];
}
//this function extracts from a vector which has the size of the
//overall r, the part that corresponds to p-dofs
void GetPPart (const VectorType& rtot, VectorType& rp)
{
if (rp.size() != mpressure_indices.size() )
rp.resize (mpressure_indices.size(), false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(rp.size()); i++)
rp[i] = rtot[mpressure_indices[i]];
}
void WriteUPart (VectorType& rtot, const VectorType& ru)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(ru.size()); i++)
rtot[mother_indices[i]] = ru[i];
}
void WritePPart (VectorType& rtot, const VectorType& rp)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(rp.size()); i++)
rtot[mpressure_indices[i]] = rp[i];
}
void ComputeDiagonalByLumping (SparseMatrixType& A,VectorType& diagA)
{
if (diagA.size() != A.size1() )
diagA.resize (A.size1() );
//get access to A data
const std::size_t* index1 = A.index1_data().begin();
// const std::size_t* index2 = A.index2_data().begin();
const double* values = A.value_data().begin();
#pragma omp parallel for
for (int i=0; i< static_cast<int>(A.size1()); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
double temp = 0.0;
for (unsigned int j=row_begin; j<row_end; j++)
temp += values[j]*values[j];
diagA[i] = sqrt(temp);
}
}
double CheckMatrix (SparseMatrixType& A)
{
//get access to A data
const std::size_t* index1 = A.index1_data().begin();
const std::size_t* index2 = A.index2_data().begin();
const double* values = A.value_data().begin();
double norm = 0.0;
for (unsigned int i=0; i<A.size1(); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
if (row_end - row_begin == 0)
std::cout << "line " << i << " has no elements" << std::endl;
//KRATOS_THROW_ERROR(std::logic_error, "line found with no entries on line ",i)
for (unsigned int j=row_begin; j<row_end; j++)
{
if (index2[j]>A.size2() )
KRATOS_THROW_ERROR (std::logic_error, "array above size of A","")
norm += values[j]*values[j];
}
}
return sqrt (norm);
}
void SolveBlockPreconditioner (const VectorType& rtot, VectorType& x)
{
boost::numeric::ublas::noalias(mp) = boost::numeric::ublas::zero_vector<double>(mother_indices.size());
boost::numeric::ublas::noalias(mu) = boost::numeric::ublas::zero_vector<double>(mother_indices.size());
VectorType uaux (mother_indices.size() );
VectorType paux (mpressure_indices.size() );
//get diagonal of K (to be removed)
VectorType diagK (mother_indices.size() );
ComputeDiagonalByLumping (mK,diagK);
//get the u and p residuals
GetUPart (rtot,mru);
GetPPart (rtot,mrp);
//solve u block
mpsolver_UU_block->Solve (mK,mu,mru);
//correct pressure block
//rp -= D*u
TSparseSpaceType::Mult (mD,mu,paux);
TSparseSpaceType::UnaliasedAdd (mrp,-1.0,paux);
//solve pressure
//p = S⁻1*rp
mpsolver_PP_block->Solve (mS,mp,mrp);
//correct u block
//u = G*p
TSparseSpaceType::Mult (mG,mp,uaux);
#pragma omp parallel for
for (int i=0; i< static_cast<int>(mu.size()); i++)
mu[i] += uaux[i]/diagK[i];
//write back solution
WriteUPart (x,mu);
WritePPart (x,mp);
}
/// Compute the Pressure System Matrix
/**
* Compute the System Matrix A = L - D*Inv(Diag(S))*G. The multiplication
* is performed in random order, so each row will be stored in a temporary
* variable, ordered and copied in input matrix A.
*/
void CalculateShurComplement (
SparseMatrixType& A,
SparseMatrixType& K,
SparseMatrixType& rG,
SparseMatrixType& rD,
SparseMatrixType& rL,
VectorType& diagK
)
{
// Retrieve matrices
// Compute Inv(Diag(S))
VectorType& rIDiagS = diagK;
//KRATOS_WATCH(804)
typedef DenseVector<int> IndexVector;
//typedef typename SparseMatrixType::iterator1 OuterIt;
//typedef typename SparseMatrixType::iterator2 InnerIt;
typedef typename boost::numeric::ublas::matrix_row< SparseMatrixType > RowType;
int DiagSize = int (diagK.size()); // to avoid comparison between int & unsigned int
#pragma omp parallel for
for ( int i = 0; i < DiagSize; i++)
rIDiagS[i] = 1.0/diagK[i];
OpenMPUtils::PartitionVector Partition;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions (A.size1(),NumThreads,Partition);
#pragma omp parallel
{
int k = OpenMPUtils::ThisThread();
VectorType CurrentRow(K.size2());
for (unsigned int i = 0; i < rL.size1(); i++) CurrentRow[i] = 0.0;
IndexVector Next = IndexVector(rL.size1());
//IndexVector& Next = *pNext; // Keeps track of which columns were filled
for (unsigned int m=0; m < rL.size1(); m++) Next[m] = -1;
std::size_t NumTerms = 0; // Full positions in a row
std::vector<unsigned int> UsedCols = std::vector<unsigned int>();
// std::vector<unsigned int>& UsedCols = *pUsedCols;
UsedCols.reserve (rL.size1());
for ( int RowIndex = Partition[k] ;
RowIndex != Partition[k+1] ; RowIndex++ )
{
RowType RowD (rD,RowIndex);
RowType RowL (rL,RowIndex);
int head = -2;
std::size_t Length = 0;
// Write L in A
for ( typename RowType::iterator ItL = RowL.begin();
ItL != RowL.end(); ItL++ )
{
CurrentRow (ItL.index() ) = *ItL;
if ( Next[ItL.index()] == -1)
{
Next[ItL.index()] = head;
head = ItL.index();
Length++;
}
}
// Substract D*Inv(Diag(S))*G
for ( typename RowType::iterator ItD = RowD.begin();
ItD != RowD.end(); ItD++ )
{
RowType RowG (rG,ItD.index() );
for ( typename RowType::iterator ItG = RowG.begin();
ItG != RowG.end(); ItG++ )
{
CurrentRow[ItG.index()] -= (*ItD) * rIDiagS[ItD.index()] * (*ItG);
if ( Next[ItG.index()] == -1)
{
Next[ItG.index()] = head;
head = ItG.index();
Length++;
}
}
}
// Identify full terms for ordering
for ( std::size_t i = 0; i < Length; i++)
{
if ( Next[head] != -1 )
{
UsedCols.push_back (head);
NumTerms++;
}
int temp = head;
head = Next[head];
// Clear 'Next' for next iteration
Next[temp] = -1;
}
// Sort Column indices
SortCols (UsedCols,NumTerms);
// Fill matrix row, then clean temporary variables.
RowType RowA (A,RowIndex);
std::size_t n = 0;
unsigned int Col;
for ( typename RowType::iterator ItA = RowA.begin(); ItA != RowA.end(); ItA++)
{
Col = UsedCols[n++];
*ItA = CurrentRow[Col];
CurrentRow[Col] = 0;
}
NumTerms = 0;
UsedCols.resize (0,false);
}
}
//KRATOS_WATCH(896)
//add stabilization matrix L
/* const std::size_t* L_index1 = rL.index1_data().begin();
const std::size_t* L_index2 = rL.index2_data().begin();
const double* L_values = rL.value_data().begin();
for (unsigned int i=0; i<rL.size1(); i++)
{
unsigned int row_begin = L_index1[i];
unsigned int row_end = L_index1[i+1];
diagA[i] = 0.0;
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col = L_index2[j];
rS(i,col) += L_values[j];
}
}*/
}
/// Helper function for Sytem matrix functions
void SortCols (
std::vector<unsigned int>& ColList,
std::size_t& NumCols)
{
bool swap = true;
unsigned int d = NumCols;
int temp;
while ( swap || d > 1 )
{
swap = false;
d = (d+1) /2;
for ( unsigned int i=0; i< (NumCols - d); i++)
if ( ColList[i+d] < ColList[i] )
{
temp = ColList[i+d];
ColList[i+d] = ColList[i];
ColList[i] = temp;
swap = true;
}
}
}
/// Identify non-zero tems in the system matrix
void ConstructSystemMatrix(
SparseMatrixType& A,
SparseMatrixType& rG,
SparseMatrixType& rD,
SparseMatrixType& rL
)
{
typedef DenseVector<int> IndexVector;
typedef OpenMPUtils::PartitionVector PartitionVector;
//typedef typename SparseMatrixType::iterator1 OuterIt;
//typedef typename SparseMatrixType::iterator2 InnerIt;
typedef typename boost::numeric::ublas::matrix_row< SparseMatrixType > RowType;
PartitionVector Partition;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions(A.size1(),NumThreads,Partition);
for ( int k = 0 ; k < NumThreads ; k++)
{
// This code is serial, the pragma is here to ensure that each
// row block is assigned to the processor that will fill it
#pragma omp parallel
if ( OpenMPUtils::ThisThread() == k)
{
// Kratos::shared_ptr< IndexVector > pNext( new IndexVector(rL.size1() ) );
// IndexVector& Next = *pNext; // Keeps track of which columns were filled
IndexVector Next(rL.size1());
for (unsigned int m = 0; m < rL.size1(); m++) Next[m] = -1;
std::size_t NumTerms = 0; // Full positions in a row
std::vector<unsigned int> UsedCols;
// std::vector<unsigned int>& UsedCols = *pUsedCols;
UsedCols.reserve(rL.size1());
for ( int RowIndex = Partition[k] ;
RowIndex != Partition[k+1] ; RowIndex++ )
{
RowType RowD(rD,RowIndex);
RowType RowL(rL,RowIndex);
int head = -2;
std::size_t Length = 0;
// Terms filled by L
for ( typename RowType::iterator ItL = RowL.begin();
ItL != RowL.end(); ItL++ )
{
if ( Next[ItL.index()] == -1)
{
Next[ItL.index()] = head;
head = ItL.index();
Length++;
}
}
// Additional terms due to D*Inv(Diag(S))*G
for ( typename RowType::iterator ItD = RowD.begin();
ItD != RowD.end(); ItD++ )
{
RowType RowG(rG,ItD.index());
for ( typename RowType::iterator ItG = RowG.begin();
ItG != RowG.end(); ItG++ )
{
if ( Next[ItG.index()] == -1)
{
Next[ItG.index()] = head;
head = ItG.index();
Length++;
}
}
}
// Identify full terms for ordering
for ( std::size_t i = 0; i < Length; i++)
{
if ( Next[head] != -1 )
{
UsedCols.push_back(head);
NumTerms++;
}
int temp = head;
head = Next[head];
// Clear 'Next' for next iteration
Next[temp] = -1;
}
// Sort Column indices
SortCols(UsedCols,NumTerms);
// Store row in matrix, clean temporary variables
for ( unsigned int i = 0; i < NumTerms; i++)
{
A.push_back(RowIndex,UsedCols[i],0);
}
NumTerms = 0;
UsedCols.resize(0,false);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class MixedUPLinearSolver
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::istream& operator >> (std::istream& IStream,
MixedUPLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
return IStream;
}
/// output stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::ostream& operator << (std::ostream& rOStream,
const MixedUPLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
rThis.PrintInfo (rOStream);
rOStream << std::endl;
rThis.PrintData (rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_MIXEDUP_SOLVER_H_INCLUDED defined
|
valid.res2.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_64_56_56_64_3_3.h"
#include "gen_ukr_A4B2gemm_1_64_56_56_64_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 56;
int Ny = 56;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int xy5=0;xy5<3136+0;xy5+=3136)
{
for(int f5=0;f5<64+0;f5+=64)
{
for(int c5=0;c5<64+0;c5+=64)
{
for(int c4=c5;c4<min(64, 64+c5);c4+=32)
{
for(int f4=f5;f4<min(64, 64+f5);f4+=Tf2)
{
for(int xy4=xy5;xy4<min(3136, 3136+xy5);xy4+=3136)
{
for(int c3=c4;c3<min(64, 32+c4);c3+=Tc1)
{
for(int f3=f4;f3<min(64, Tf2+f4);f3+=Tf2)
{
for(int xy3=xy4;xy3<min(3136, 3136+xy4);xy3+=Txy3)
{
for(int xy2=xy3;xy2<min(3136, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(64, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(64, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(64, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(3136, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(64, 16+f2);f1+=16)
{
int ctile=min(Tc1, 64-c1);
int x1=xy1/56;
int y1=xy1%56/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*215296+c1_1*3364+1*x1*58+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*9216+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*200704+of1_1*3136+x1*56+y1*1+of1_2*1;
if(56-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(56*56-xy1>=6){
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=56-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
pnm.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP N N M M %
% P P NN N MM MM %
% PPPP N N N M M M %
% P N NN M M %
% P N N M M %
% %
% %
% Read/Write PBMPlus Portable Anymap Image Format %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/module.h"
/*
Forward declarations.
*/
static MagickBooleanType
WritePNMImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P N M %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPNM() returns MagickTrue if the image format type, identified by the
% magick string, is PNM.
%
% The format of the IsPNM method is:
%
% MagickBooleanType IsPNM(const unsigned char *magick,const size_t extent)
%
% A description of each parameter follows:
%
% o magick: This string is generally the first few bytes of an image file
% or blob.
%
% o extent: Specifies the extent of the magick string.
%
*/
static MagickBooleanType IsPNM(const unsigned char *magick,const size_t extent)
{
if (extent < 2)
return(MagickFalse);
if ((*magick == (unsigned char) 'P') &&
((magick[1] == '1') || (magick[1] == '2') || (magick[1] == '3') ||
(magick[1] == '4') || (magick[1] == '5') || (magick[1] == '6') ||
(magick[1] == '7') || (magick[1] == 'F') || (magick[1] == 'f')))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P N M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPNMImage() reads a Portable Anymap image file and returns it.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the ReadPNMImage method is:
%
% Image *ReadPNMImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline long ConstrainPixel(Image *image,const long offset,
const unsigned long extent)
{
if ((offset < 0) || (offset > (long) extent))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageError,"InvalidPixel","`%s'",image->filename);
return(0);
}
return(offset);
}
static unsigned long PNMInteger(Image *image,const unsigned int base)
{
char
*comment;
int
c;
register char
*p;
size_t
extent;
unsigned long
value;
/*
Skip any leading whitespace.
*/
extent=MaxTextExtent;
comment=(char *) NULL;
p=comment;
do
{
c=ReadBlobByte(image);
if (c == EOF)
return(0);
if (c == (int) '#')
{
/*
Read comment.
*/
if (comment == (char *) NULL)
comment=AcquireString((char *) NULL);
p=comment+strlen(comment);
for ( ; (c != EOF) && (c != (int) '\n'); p++)
{
if ((size_t) (p-comment+1) >= extent)
{
extent<<=1;
comment=(char *) ResizeQuantumMemory(comment,extent+MaxTextExtent,
sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
c=ReadBlobByte(image);
*p=(char) c;
*(p+1)='\0';
}
if (comment == (char *) NULL)
return(0);
continue;
}
} while (isdigit(c) == MagickFalse);
if (comment != (char *) NULL)
{
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
}
if (base == 2)
return((unsigned long) (c-(int) '0'));
/*
Evaluate number.
*/
value=0;
do
{
value*=10;
value+=c-(int) '0';
c=ReadBlobByte(image);
if (c == EOF)
return(value);
} while (isdigit(c) != MagickFalse);
return(value);
}
static Image *ReadPNMImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
format;
double
quantum_scale;
Image
*image;
long
row,
y;
MagickBooleanType
status;
Quantum
*scale;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register const unsigned char
*p;
register long
x;
register PixelPacket
*q;
register long
i;
size_t
extent,
packet_size;
ssize_t
count;
unsigned long
depth,
max_value;
ViewInfo
*image_view;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read PNM image.
*/
count=ReadBlob(image,1,(unsigned char *) &format);
do
{
/*
Initialize image structure.
*/
if ((count != 1) || (format != 'P'))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
max_value=1;
quantum_type=RGBQuantum;
quantum_scale=1.0;
format=(char) ReadBlobByte(image);
if (format != '7')
{
/*
PBM, PGM, PPM, and PNM.
*/
image->columns=PNMInteger(image,10);
image->rows=PNMInteger(image,10);
if ((format == 'f') || (format == 'F'))
{
char
scale[MaxTextExtent];
(void) ReadBlobString(image,scale);
quantum_scale=atof(scale);
}
else
{
if ((format == '1') || (format == '4'))
max_value=1; /* bitmap */
else
max_value=PNMInteger(image,10);
}
}
else
{
char
keyword[MaxTextExtent],
value[MaxTextExtent];
int
c;
register char
*p;
/*
PAM.
*/
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
p=keyword;
do
{
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=c;
c=ReadBlobByte(image);
} while (isalnum(c));
*p='\0';
if (LocaleCompare(keyword,"endhdr") == 0)
break;
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
p=value;
while (isalnum(c) || (c == '_'))
{
if ((size_t) (p-value) < (MaxTextExtent-1))
*p++=c;
c=ReadBlobByte(image);
}
*p='\0';
/*
Assign a value to the specified keyword.
*/
if (LocaleCompare(keyword,"depth") == 0)
packet_size=(unsigned long) atol(value);
if (LocaleCompare(keyword,"height") == 0)
image->rows=(unsigned long) atol(value);
if (LocaleCompare(keyword,"maxval") == 0)
max_value=(unsigned long) atol(value);
if (LocaleCompare(keyword,"TUPLTYPE") == 0)
{
if (LocaleCompare(value,"BLACKANDWHITE") == 0)
quantum_type=GrayQuantum;
if (LocaleCompare(value,"BLACKANDWHITE_ALPHA") == 0)
{
quantum_type=GrayAlphaQuantum;
image->matte=MagickTrue;
}
if (LocaleCompare(value,"GRAYSCALE") == 0)
quantum_type=GrayQuantum;
if (LocaleCompare(value,"GRAYSCALE_ALPHA") == 0)
{
quantum_type=GrayAlphaQuantum;
image->matte=MagickTrue;
}
if (LocaleCompare(value,"RGB_ALPHA") == 0)
{
quantum_type=RGBAQuantum;
image->matte=MagickTrue;
}
if (LocaleCompare(value,"CMYK") == 0)
{
quantum_type=CMYKQuantum;
image->colorspace=CMYKColorspace;
}
if (LocaleCompare(value,"CMYK_ALPHA") == 0)
{
quantum_type=CMYKAQuantum;
image->colorspace=CMYKColorspace;
image->matte=MagickTrue;
}
}
if (LocaleCompare(keyword,"width") == 0)
image->columns=(unsigned long) atol(value);
}
}
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (max_value >= 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
for (depth=1; GetQuantumRange(depth) < max_value; depth++) ;
image->depth=depth;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
/*
Convert PNM pixels to runextent-encoded MIFF packets.
*/
status=MagickTrue;
row=0;
switch (format)
{
case '1':
{
/*
Convert PBM image to pixel packets.
*/
for (y=0; y < (long) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (long) image->columns; x++)
{
q->red=(Quantum) (PNMInteger(image,2) == 0 ? QuantumRange : 0);
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
image->type=BilevelType;
break;
}
case '2':
{
unsigned long
intensity;
/*
Convert PGM image to pixel packets.
*/
scale=(Quantum *) NULL;
if (max_value != (1U*QuantumRange))
{
/*
Compute pixel scaling table.
*/
scale=(Quantum *) AcquireQuantumMemory((size_t) max_value+1UL,
sizeof(*scale));
if (scale == (Quantum *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i <= (long) max_value; i++)
scale[i]=(Quantum) (((double) QuantumRange*i)/max_value+0.5);
}
for (y=0; y < (long) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (long) image->columns; x++)
{
intensity=PNMInteger(image,10);
if (scale != (Quantum *) NULL)
intensity=(unsigned long) scale[ConstrainPixel(image,(long)
intensity,max_value)];
q->red=(Quantum) intensity;
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
image->type=GrayscaleType;
if (scale != (Quantum *) NULL)
scale=(Quantum *) RelinquishMagickMemory(scale);
break;
}
case '3':
{
MagickPixelPacket
pixel;
/*
Convert PNM image to pixel packets.
*/
scale=(Quantum *) NULL;
if (max_value != (1U*QuantumRange))
{
/*
Compute pixel scaling table.
*/
scale=(Quantum *) AcquireQuantumMemory((size_t) max_value+1UL,
sizeof(*scale));
if (scale == (Quantum *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i <= (long) max_value; i++)
scale[i]=(Quantum) (((double) QuantumRange*i)/max_value+0.5);
}
for (y=0; y < (long) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (long) image->columns; x++)
{
pixel.red=(MagickRealType) PNMInteger(image,10);
pixel.green=(MagickRealType) PNMInteger(image,10);
pixel.blue=(MagickRealType) PNMInteger(image,10);
if (scale != (Quantum *) NULL)
{
pixel.red=(MagickRealType) scale[ConstrainPixel(image,(long)
pixel.red,max_value)];
pixel.green=(MagickRealType) scale[ConstrainPixel(image,(long)
pixel.green,max_value)];
pixel.blue=(MagickRealType) scale[ConstrainPixel(image,(long)
pixel.blue,max_value)];
}
q->red=(Quantum) pixel.red;
q->green=(Quantum) pixel.green;
q->blue=(Quantum) pixel.blue;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
if (scale != (Quantum *) NULL)
scale=(Quantum *) RelinquishMagickMemory(scale);
break;
}
case '4':
{
/*
Convert PBM raw image to pixel packets.
*/
quantum_type=GrayQuantum;
if (image->storage_class == PseudoClass)
quantum_type=IndexQuantum;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
SetQuantumMinIsWhite(quantum_info,MagickTrue);
extent=GetQuantumExtent(image,quantum_info,quantum_type);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) shared(row,status,quantum_type)
#endif
for (y=0; y < (long) image->rows; y++)
{
long
offset;
MagickBooleanType
sync;
register PixelPacket
*q;
ssize_t
count;
size_t
length;
unsigned char
*pixels;
if (status == MagickFalse)
continue;
pixels=GetQuantumPixels(quantum_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
{
count=ReadBlob(image,extent,pixels);
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,row,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
offset=row++;
}
if (count != (ssize_t) extent)
status=MagickFalse;
q=QueueCacheViewAuthenticPixels(image_view,0,offset,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
length=ImportQuantumPixels(image,image_view,quantum_info,quantum_type,
pixels,exception);
if (length != extent)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
quantum_info=DestroyQuantumInfo(quantum_info);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
SetQuantumImageType(image,quantum_type);
break;
}
case '5':
{
QuantumAny
scale;
/*
Convert PGM raw image to pixel packets.
*/
scale=GetQuantumScale(image->depth);
quantum_type=GrayQuantum;
extent=(image->depth <= 8 ? 1 : 2)*image->columns;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) shared(row,status,quantum_type)
#endif
for (y=0; y < (long) image->rows; y++)
{
long
offset;
MagickBooleanType
sync;
register const unsigned char
*p;
register long
x;
register PixelPacket
*q;
ssize_t
count;
unsigned char
*pixels;
if (status == MagickFalse)
continue;
pixels=GetQuantumPixels(quantum_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
{
count=ReadBlob(image,extent,pixels);
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,row,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
offset=row++;
}
if (count != (ssize_t) extent)
status=MagickFalse;
q=QueueCacheViewAuthenticPixels(image_view,0,offset,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
p=pixels;
if ((image->depth == 8) || (image->depth == 16))
(void) ImportQuantumPixels(image,image_view,quantum_info,
quantum_type,pixels,exception);
else
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
q->green=q->red;
q->blue=q->red;
q++;
}
}
else
{
unsigned short
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
q->green=q->red;
q->blue=q->red;
q++;
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
quantum_info=DestroyQuantumInfo(quantum_info);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
SetQuantumImageType(image,quantum_type);
break;
}
case '6':
{
ImageType
type;
QuantumAny
scale;
/*
Convert PNM raster image to pixel packets.
*/
type=BilevelType;
quantum_type=RGBQuantum;
extent=3*(image->depth <= 8 ? 1 : 2)*image->columns;
scale=GetQuantumScale(image->depth);
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) shared(row,status,type)
#endif
for (y=0; y < (long) image->rows; y++)
{
long
offset;
MagickBooleanType
sync;
register long
x;
register PixelPacket
*q;
ssize_t
count;
size_t
length;
unsigned char
*pixels;
if (status == MagickFalse)
continue;
pixels=GetQuantumPixels(quantum_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
{
count=ReadBlob(image,extent,pixels);
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,row,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
offset=row++;
}
if (count != (ssize_t) extent)
status=MagickFalse;
q=QueueCacheViewAuthenticPixels(image_view,0,offset,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if ((image->depth == 8) || (image->depth == 16))
{
length=ImportQuantumPixels(image,image_view,quantum_info,
quantum_type,pixels,exception);
if (length != extent)
status=MagickFalse;
}
else
if (image->depth <= 8)
{
unsigned char
pixel;
register PixelPacket
*r;
r=q;
for (x=0; x < (long) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
r->red=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushCharPixel(p,&pixel);
r->green=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushCharPixel(p,&pixel);
r->blue=ScaleAnyToQuantum(pixel,image->depth,scale);
r++;
}
}
else
{
unsigned short
pixel;
register PixelPacket
*r;
r=q;
for (x=0; x < (long) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
r->red=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushShortPixel(MSBEndian,p,&pixel);
r->green=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushShortPixel(MSBEndian,p,&pixel);
r->blue=ScaleAnyToQuantum(pixel,image->depth,scale);
r++;
}
}
if ((type == BilevelType) || (type == GrayscaleType))
for (x=0; x < (long) image->columns; x++)
{
if ((type == BilevelType) &&
(IsMonochromePixel(q) == MagickFalse))
type=IsGrayPixel(q) == MagickFalse ? UndefinedType :
GrayscaleType;
if ((type == GrayscaleType) && (IsGrayPixel(q) == MagickFalse))
type=UndefinedType;
if ((type != BilevelType) && (type != GrayscaleType))
break;
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
quantum_info=DestroyQuantumInfo(quantum_info);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
if (type != UndefinedType)
image->type=type;
break;
}
case '7':
{
register IndexPacket
*indexes;
QuantumAny
scale;
/*
Convert PAM raster image to pixel packets.
*/
scale=GetQuantumScale(image->depth);
switch (quantum_type)
{
case GrayQuantum:
case GrayAlphaQuantum:
{
extent=(image->depth <= 8 ? 1 : 2)*image->columns;
break;
}
case CMYKQuantum:
case CMYKAQuantum:
{
extent=4*(image->depth <= 8 ? 1 : 2)*image->columns;
break;
}
default:
{
extent=3*(image->depth <= 8 ? 1 : 2)*image->columns;
break;
}
}
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) shared(row,status,quantum_type)
#endif
for (y=0; y < (long) image->rows; y++)
{
long
offset;
MagickBooleanType
sync;
register const unsigned char
*p;
register long
x;
register PixelPacket
*q;
ssize_t
count;
unsigned char
*pixels;
if (status == MagickFalse)
continue;
pixels=GetQuantumPixels(quantum_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
{
count=ReadBlob(image,extent,pixels);
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,row,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
offset=row++;
}
if (count != (ssize_t) extent)
status=MagickFalse;
q=QueueCacheViewAuthenticPixels(image_view,0,offset,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p=pixels;
if ((image->depth == 8) || (image->depth == 16))
(void) ImportQuantumPixels(image,image_view,quantum_info,
quantum_type,pixels,exception);
else
switch (quantum_type)
{
case GrayQuantum:
case GrayAlphaQuantum:
{
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
q->green=q->red;
q->blue=q->red;
if (image->matte != MagickFalse)
{
p=PushCharPixel(p,&pixel);
q->opacity=ScaleAnyToQuantum(pixel,image->depth,
scale);
}
q++;
}
}
else
{
unsigned short
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
q->green=q->red;
q->blue=q->red;
if (image->matte != MagickFalse)
{
p=PushShortPixel(MSBEndian,p,&pixel);
q->opacity=ScaleAnyToQuantum(pixel,image->depth,
scale);
}
q++;
}
}
break;
}
case CMYKQuantum:
case CMYKAQuantum:
{
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushCharPixel(p,&pixel);
q->green=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushCharPixel(p,&pixel);
q->blue=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushCharPixel(p,&pixel);
indexes[x]=ScaleAnyToQuantum(pixel,image->depth,scale);
if (image->matte != MagickFalse)
{
p=PushCharPixel(p,&pixel);
q->opacity=ScaleAnyToQuantum(pixel,image->depth,
scale);
}
q++;
}
}
else
{
unsigned short
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushShortPixel(MSBEndian,p,&pixel);
q->green=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushShortPixel(MSBEndian,p,&pixel);
q->blue=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushShortPixel(MSBEndian,p,&pixel);
indexes[x]=ScaleAnyToQuantum(pixel,image->depth,scale);
if (image->matte != MagickFalse)
{
p=PushShortPixel(MSBEndian,p,&pixel);
q->opacity=ScaleAnyToQuantum(pixel,image->depth,
scale);
}
q++;
}
}
break;
}
default:
{
if (image->depth <= 8)
{
unsigned char
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushCharPixel(p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushCharPixel(p,&pixel);
q->green=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushCharPixel(p,&pixel);
q->blue=ScaleAnyToQuantum(pixel,image->depth,scale);
if (image->matte != MagickFalse)
{
p=PushCharPixel(p,&pixel);
q->opacity=ScaleAnyToQuantum(pixel,image->depth,
scale);
}
q++;
}
}
else
{
unsigned short
pixel;
for (x=0; x < (long) image->columns; x++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
q->red=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushShortPixel(MSBEndian,p,&pixel);
q->green=ScaleAnyToQuantum(pixel,image->depth,scale);
p=PushShortPixel(MSBEndian,p,&pixel);
q->blue=ScaleAnyToQuantum(pixel,image->depth,scale);
if (image->matte != MagickFalse)
{
p=PushShortPixel(MSBEndian,p,&pixel);
q->opacity=ScaleAnyToQuantum(pixel,image->depth,
scale);
}
q++;
}
}
break;
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
quantum_info=DestroyQuantumInfo(quantum_info);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
SetQuantumImageType(image,quantum_type);
break;
}
case 'F':
case 'f':
{
/*
Convert PFM raster image to pixel packets.
*/
quantum_type=format == 'f' ? GrayQuantum : RGBQuantum;
image->endian=quantum_scale < 0.0 ? LSBEndian : MSBEndian;
image->depth=32;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
status=SetQuantumDepth(image,quantum_info,32);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
SetQuantumScale(quantum_info,(MagickRealType) QuantumRange*
fabs(quantum_scale));
extent=GetQuantumExtent(image,quantum_info,quantum_type);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) shared(row,status,quantum_type)
#endif
for (y=0; y < (long) image->rows; y++)
{
long
offset;
MagickBooleanType
sync;
register PixelPacket
*q;
ssize_t
count;
size_t
length;
unsigned char
*pixels;
if (status == MagickFalse)
continue;
pixels=GetQuantumPixels(quantum_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
{
count=ReadBlob(image,extent,pixels);
if ((image->progress_monitor != (MagickProgressMonitor) NULL) &&
(image->previous == (Image *) NULL))
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,LoadImageTag,row,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
offset=row++;
}
if ((size_t) count != extent)
status=MagickFalse;
q=QueueCacheViewAuthenticPixels(image_view,0,(long) (image->rows-
offset-1),image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
length=ImportQuantumPixels(image,image_view,quantum_info,quantum_type,
pixels,exception);
if (length != extent)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
quantum_info=DestroyQuantumInfo(quantum_info);
if (status == MagickFalse)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
SetQuantumImageType(image,quantum_type);
break;
}
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (EOFBlob(image) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"UnexpectedEndOfFile","`%s'",image->filename);
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((format == '1') || (format == '2') || (format == '3'))
do
{
/*
Skip to end of line.
*/
count=ReadBlob(image,1,(unsigned char *) &format);
if (count == 0)
break;
if ((count != 0) && (format == 'P'))
break;
} while (format != '\n');
count=ReadBlob(image,1,(unsigned char *) &format);
if ((count == 1) && (format == 'P'))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while ((count == 1) && (format == 'P'));
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P N M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPNMImage() adds properties for the PNM image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPNMImage method is:
%
% unsigned long RegisterPNMImage(void)
%
*/
ModuleExport unsigned long RegisterPNMImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PAM");
entry->decoder=(DecodeImageHandler *) ReadPNMImage;
entry->encoder=(EncodeImageHandler *) WritePNMImage;
entry->description=ConstantString("Common 2-dimensional bitmap format");
entry->module=ConstantString("PNM");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PBM");
entry->decoder=(DecodeImageHandler *) ReadPNMImage;
entry->encoder=(EncodeImageHandler *) WritePNMImage;
entry->description=ConstantString("Portable bitmap format (black and white)");
entry->module=ConstantString("PNM");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PFM");
entry->decoder=(DecodeImageHandler *) ReadPNMImage;
entry->encoder=(EncodeImageHandler *) WritePNMImage;
entry->description=ConstantString("Portable float format");
entry->module=ConstantString("PFM");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PGM");
entry->decoder=(DecodeImageHandler *) ReadPNMImage;
entry->encoder=(EncodeImageHandler *) WritePNMImage;
entry->description=ConstantString("Portable graymap format (gray scale)");
entry->module=ConstantString("PNM");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PNM");
entry->decoder=(DecodeImageHandler *) ReadPNMImage;
entry->encoder=(EncodeImageHandler *) WritePNMImage;
entry->magick=(IsImageFormatHandler *) IsPNM;
entry->description=ConstantString("Portable anymap");
entry->module=ConstantString("PNM");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PPM");
entry->decoder=(DecodeImageHandler *) ReadPNMImage;
entry->encoder=(EncodeImageHandler *) WritePNMImage;
entry->description=ConstantString("Portable pixmap format (color)");
entry->module=ConstantString("PNM");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P N M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPNMImage() removes format registrations made by the
% PNM module from the list of supported formats.
%
% The format of the UnregisterPNMImage method is:
%
% UnregisterPNMImage(void)
%
*/
ModuleExport void UnregisterPNMImage(void)
{
(void) UnregisterMagickInfo("PAM");
(void) UnregisterMagickInfo("PBM");
(void) UnregisterMagickInfo("PGM");
(void) UnregisterMagickInfo("PNM");
(void) UnregisterMagickInfo("PPM");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P N M I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Procedure WritePNMImage() writes an image to a file in the PNM rasterfile
% format.
%
% The format of the WritePNMImage method is:
%
% MagickBooleanType WritePNMImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WritePNMImage(const ImageInfo *image_info,Image *image)
{
char
buffer[MaxTextExtent],
format,
magick[MaxTextExtent];
const char
*value;
IndexPacket
index;
long
y;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumAny
pixel;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register long
i,
x;
register unsigned char
*pixels,
*q;
ssize_t
count;
size_t
extent,
packet_size;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
scene=0;
do
{
/*
Write PNM file header.
*/
packet_size=3;
quantum_type=RGBQuantum;
(void) CopyMagickString(magick,image_info->magick,MaxTextExtent);
switch (magick[1])
{
case 'A':
case 'a':
{
format='7';
break;
}
case 'B':
case 'b':
{
format='4';
if (image_info->compression == NoCompression)
format='1';
break;
}
case 'F':
case 'f':
{
format='F';
if (IsGrayImage(image,&image->exception) != MagickFalse)
format='f';
break;
}
case 'G':
case 'g':
{
format='5';
if (image_info->compression == NoCompression)
format='2';
break;
}
case 'N':
case 'n':
{
if ((image_info->type != TrueColorType) &&
(IsGrayImage(image,&image->exception) != MagickFalse))
{
format='5';
if (image_info->compression == NoCompression)
format='2';
if (IsMonochromeImage(image,&image->exception) != MagickFalse)
{
format='4';
if (image_info->compression == NoCompression)
format='1';
}
break;
}
}
default:
{
format='6';
if (image_info->compression == NoCompression)
format='3';
break;
}
}
(void) FormatMagickString(buffer,MaxTextExtent,"P%c\n",format);
(void) WriteBlobString(image,buffer);
value=GetImageProperty(image,"comment");
if (value != (const char *) NULL)
{
register const char
*p;
/*
Write comments to file.
*/
(void) WriteBlobByte(image,'#');
for (p=value; *p != '\0'; p++)
{
(void) WriteBlobByte(image,(unsigned char) *p);
if ((*p == '\r') && (*(p+1) != '\0'))
(void) WriteBlobByte(image,'#');
if ((*p == '\n') && (*(p+1) != '\0'))
(void) WriteBlobByte(image,'#');
}
(void) WriteBlobByte(image,'\n');
}
if (format != '7')
{
if (image->colorspace != RGBColorspace)
(void) SetImageColorspace(image,RGBColorspace);
(void) FormatMagickString(buffer,MaxTextExtent,"%lu %lu\n",
image->columns,image->rows);
(void) WriteBlobString(image,buffer);
}
else
{
char
type[MaxTextExtent];
/*
PAM header.
*/
(void) FormatMagickString(buffer,MaxTextExtent,
"WIDTH %lu\nHEIGHT %lu\n",image->columns,image->rows);
(void) WriteBlobString(image,buffer);
quantum_type=GetQuantumType(image,&image->exception);
switch (quantum_type)
{
case CMYKQuantum:
case CMYKAQuantum:
{
packet_size=4;
(void) CopyMagickString(type,"CMYK",MaxTextExtent);
break;
}
case GrayQuantum:
case GrayAlphaQuantum:
{
packet_size=1;
(void) CopyMagickString(type,"GRAYSCALE",MaxTextExtent);
break;
}
default:
{
quantum_type=RGBQuantum;
packet_size=3;
(void) CopyMagickString(type,"RGB",MaxTextExtent);
break;
}
}
if (image->matte != MagickFalse)
{
packet_size++;
(void) ConcatenateMagickString(type,"_ALPHA",MaxTextExtent);
}
if (image->depth > 16)
image->depth=16;
(void) FormatMagickString(buffer,MaxTextExtent,
"DEPTH %lu\nMAXVAL %lu\n",(unsigned long) packet_size,(unsigned long)
GetQuantumRange(image->depth));
(void) WriteBlobString(image,buffer);
(void) FormatMagickString(buffer,MaxTextExtent,"TUPLTYPE %s\nENDHDR\n",
type);
(void) WriteBlobString(image,buffer);
}
/*
Convert runextent encoded to PNM raster pixels.
*/
switch (format)
{
case '1':
{
unsigned char
pixels[2048];
/*
Convert image to a PBM image.
*/
q=pixels;
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (long) image->columns; x++)
{
pixel=PixelIntensityToQuantum(p);
*q++=(unsigned char) (pixel >= (Quantum) (QuantumRange/2) ?
'0' : '1');
*q++=' ';
if ((q-pixels+2) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
i=0;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '2':
{
unsigned char
pixels[2048];
/*
Convert image to a PGM image.
*/
if (image->depth <= 8)
(void) WriteBlobString(image,"255\n");
else
(void) WriteBlobString(image,"65535\n");
q=pixels;
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (long) image->columns; x++)
{
index=PixelIntensityToQuantum(p);
if (image->depth <= 8)
count=(ssize_t) FormatMagickString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToChar(index));
else
count=(ssize_t) FormatMagickString(buffer,MaxTextExtent,"%u ",
ScaleQuantumToShort(index));
extent=(size_t) count;
(void) strncpy((char *) q,buffer,extent);
q+=extent;
if ((q-pixels+extent) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '3':
{
unsigned char
pixels[2048];
/*
Convert image to a PNM image.
*/
if (image->depth <= 8)
(void) WriteBlobString(image,"255\n");
else
(void) WriteBlobString(image,"65535\n");
q=pixels;
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (long) image->columns; x++)
{
if (image->depth <= 8)
count=(ssize_t) FormatMagickString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToChar(p->red),
ScaleQuantumToChar(p->green),ScaleQuantumToChar(p->blue));
else
count=(ssize_t) FormatMagickString(buffer,MaxTextExtent,
"%u %u %u ",ScaleQuantumToShort(p->red),
ScaleQuantumToShort(p->green),ScaleQuantumToShort(p->blue));
extent=(size_t) count;
(void) strncpy((char *) q,buffer,extent);
q+=extent;
if ((q-pixels+extent) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case '4':
{
/*
Convert image to a PBM image.
*/
image->depth=1;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
quantum_info->min_is_white=MagickTrue;
pixels=GetQuantumPixels(quantum_info);
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
extent=ExportQuantumPixels(image,(const ViewInfo *) NULL,quantum_info,
GrayQuantum,pixels,&image->exception);
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '5':
{
QuantumAny
scale;
/*
Convert image to a PGM image.
*/
if (image->depth > 8)
image->depth=16;
(void) FormatMagickString(buffer,MaxTextExtent,"%lu\n",(unsigned long)
GetQuantumRange(image->depth));
(void) WriteBlobString(image,buffer);
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
quantum_info->min_is_white=MagickTrue;
pixels=GetQuantumPixels(quantum_info);
extent=GetQuantumExtent(image,quantum_info,GrayQuantum);
scale=GetQuantumScale(image->depth);
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
if ((image->depth == 8) || (image->depth == 16))
extent=ExportQuantumPixels(image,(const ViewInfo *) NULL,
quantum_info,GrayQuantum,pixels,&image->exception);
else
{
if (image->depth <= 8)
for (x=0; x < (long) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(PixelIntensityToQuantum(p),
image->depth,scale);
else
{
if (image->depth == 8)
pixel=ScaleQuantumToChar(p->red);
else
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
}
q=PopCharPixel((unsigned char) pixel,q);
p++;
}
else
for (x=0; x < (long) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
pixel=ScaleQuantumToAny(PixelIntensityToQuantum(p),
image->depth,scale);
else
{
if (image->depth == 16)
pixel=ScaleQuantumToShort(p->red);
else
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
}
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
p++;
}
extent=(size_t) (q-pixels);
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '6':
{
QuantumAny
scale;
/*
Convert image to a PNM image.
*/
if (image->depth > 8)
image->depth=16;
(void) FormatMagickString(buffer,MaxTextExtent,"%lu\n",(unsigned long)
GetQuantumRange(image->depth));
(void) WriteBlobString(image,buffer);
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
extent=GetQuantumExtent(image,quantum_info,quantum_type);
scale=GetQuantumScale(image->depth);
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
q=pixels;
if ((image->depth == 8) || (image->depth == 16))
extent=ExportQuantumPixels(image,(const ViewInfo *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
else
{
if (image->depth <= 8)
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(p->green,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(p->blue,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
p->opacity),image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
else
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(p->green,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(p->blue,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
p->opacity),image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
extent=(size_t) (q-pixels);
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case '7':
{
QuantumAny
scale;
/*
Convert image to a PAM.
*/
if (image->depth > 16)
image->depth=16;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if ((quantum_info->format == UndefinedQuantumFormat) &&
(IsHighDynamicRangeImage(image,&image->exception) != MagickFalse))
{
status=SetQuantumFormat(image,quantum_info,
FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=GetQuantumPixels(quantum_info);
scale=GetQuantumScale(image->depth);
for (y=0; y < (long) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
q=pixels;
if ((image->depth == 8) || (image->depth == 16))
extent=ExportQuantumPixels(image,(const ViewInfo *) NULL,
quantum_info,quantum_type,pixels,&image->exception);
else
{
switch (quantum_type)
{
case GrayQuantum:
case GrayAlphaQuantum:
{
if (image->depth <= 8)
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(PixelIntensityToQuantum(p),
image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(p->opacity,
image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
else
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(PixelIntensityToQuantum(p),
image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=(unsigned char) ScaleQuantumToAny(p->opacity,
image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
case CMYKQuantum:
case CMYKAQuantum:
{
if (image->depth <= 8)
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(p->green,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(p->blue,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(indexes[x],image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
p->opacity),image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
else
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(p->green,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(p->blue,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(indexes[x],image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
p->opacity),image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
default:
{
if (image->depth <= 8)
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(p->green,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
pixel=ScaleQuantumToAny(p->blue,image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
p->opacity),image->depth,scale);
q=PopCharPixel((unsigned char) pixel,q);
}
p++;
}
else
for (x=0; x < (long) image->columns; x++)
{
pixel=ScaleQuantumToAny(p->red,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(p->green,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
pixel=ScaleQuantumToAny(p->blue,image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
if (image->matte != MagickFalse)
{
pixel=ScaleQuantumToAny((Quantum) (QuantumRange-
p->opacity),image->depth,scale);
q=PopShortPixel(MSBEndian,(unsigned short) pixel,q);
}
p++;
}
break;
}
}
extent=(size_t) (q-pixels);
}
count=WriteBlob(image,extent,pixels);
if (count != (ssize_t) extent)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
case 'F':
case 'f':
{
(void) WriteBlobString(image,image->endian != LSBEndian ? "1.0\n" :
"-1.0\n");
image->depth=32;
quantum_type=format == 'f' ? GrayQuantum : RGBQuantum;
quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
for (y=(long) image->rows-1; y >= 0; y--)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
extent=ExportQuantumPixels(image,(const ViewInfo *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
(void) WriteBlob(image,extent,pixels);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,y,image->rows);
if (status == MagickFalse)
break;
}
}
quantum_info=DestroyQuantumInfo(quantum_info);
break;
}
}
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,
GetImageListLength(image));
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
DRB113-default-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
default(none) to enforce explictly list all variables in data-sharing attribute clauses
default(shared) to cover another option.
*/
#include <omp.h>
int a[100][100];
int b[100][100];
int main()
{
int i;
int j;
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
a[i][j] = i;
b[i][j] = i;
}
}
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
a[i][j] = a[i][j] + 1;
}
}
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
b[i][j] = b[i][j] + 1;
}
}
for (i = 0; i <= 99; i += 1) {
for (j = 0; j <= 99; j += 1) {
printf("%d %d\n",a[i][j],b[i][j]);
}
}
return 0;
}
|
GB_unaryop__abs_uint32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_int32
// op(A') function: GB_tran__abs_uint32_int32
// C type: uint32_t
// A type: int32_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_int32
(
uint32_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% John Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(PrimitiveInfo *,const char *);
static void
TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(PrimitiveInfo *,const size_t),
TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo,
PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireMagickMemory(sizeof(*draw_info));
if (draw_info == (DrawInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
clone_info=(DrawInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (DrawInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
if (clone_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
&draw_info->fill_pattern->exception);
else
if (draw_info->tile != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue,
&draw_info->tile->exception);
clone_info->tile=NewImageList(); /* tile is deprecated */
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,&draw_info->stroke_pattern->exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; draw_info->dash_pattern[x] != 0.0; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern,
(size_t) (x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->gradient.stops,
draw_info->gradient.stops,(size_t) number_stops*
sizeof(*clone_info->gradient.stops));
}
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
clone_info->bounds=draw_info->bounds;
clone_info->clip_units=draw_info->clip_units;
clone_info->render=draw_info->render;
clone_info->opacity=draw_info->opacity;
clone_info->element_reference=draw_info->element_reference;
clone_info->debug=IsEventLogging();
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info,
% const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int CompareEdges(const void *x,const void *y)
{
register const EdgeInfo
*p,
*q;
/*
Compare two edges.
*/
p=(const EdgeInfo *) x;
q=(const EdgeInfo *) y;
if ((p->points[0].y-MagickEpsilon) > q->points[0].y)
return(1);
if ((p->points[0].y+MagickEpsilon) < q->points[0].y)
return(-1);
if ((p->points[0].x-MagickEpsilon) > q->points[0].x)
return(1);
if ((p->points[0].x+MagickEpsilon) < q->points[0].x)
return(-1);
if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)-
(p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0)
return(1);
return(-1);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g %g - %g %g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g %g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(
const DrawInfo *magick_unused(draw_info),const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory((size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) ResetMagickMemory(&point,0,sizeof(point));
(void) ResetMagickMemory(&bounds,0,sizeof(bounds));
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((path_info[i].point.y == point.y) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((direction != 0) && (direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),CompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g %g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info)
{
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
}
coordinates--;
/*
Eliminate duplicate points.
*/
if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue;
if ((fabs(p.x-primitive_info[i].point.x) < MagickEpsilon) &&
(fabs(p.y-primitive_info[i].point.y) < MagickEpsilon))
continue;
/*
Mark the p point as open if it does not match the q.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo
% structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->tile != (Image *) NULL)
draw_info->tile=DestroyImage(draw_info->tile);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
draw_info->signature=(~MagickSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetMagickPixelPacket(image,&zero);
exception=(&image->exception);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source,image,1,1)
#endif
for (y=start; y <= stop; y++)
{
MagickPixelPacket
composite,
pixel;
PointInfo
point;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
(void) InterpolateMagickPixelPacket(source,source_view,
UndefinedInterpolatePixel,point.x,point.y,&pixel,exception);
SetMagickPixelPacket(image,q,indexes+x_offset,&composite);
MagickPixelCompositeOver(&pixel,pixel.opacity,&composite,
composite.opacity,&composite);
SetPixelPacket(image,&composite,q,indexes+x_offset);
x_offset++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
% PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
*/
static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
const PolygonInfo *polygon_info)
{
double
mid;
DrawInfo
*clone_info;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) QueryColorDatabase("#0000",&clone_info->fill,&image->exception);
resolution.x=DefaultResolution;
resolution.y=DefaultResolution;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/72.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
(void) QueryColorDatabase("red",&clone_info->stroke,
&image->exception);
else
(void) QueryColorDatabase("green",&clone_info->stroke,
&image->exception);
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info);
}
}
(void) QueryColorDatabase("blue",&clone_info->stroke,&image->exception);
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the name of the clip path.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *name)
{
char
clip_mask[MaxTextExtent];
const char
*value;
DrawInfo
*clone_info;
MagickStatusType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
(void) FormatLocaleString(clip_mask,MaxTextExtent,"%s",name);
value=GetImageArtifact(image,clip_mask);
if (value == (const char *) NULL)
return(MagickFalse);
if (image->clip_mask == (Image *) NULL)
{
Image
*clip_mask;
clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
}
(void) QueryColorDatabase("#00000000",&image->clip_mask->background_color,
&image->exception);
image->clip_mask->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(image->clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
draw_info->clip_mask);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,value);
(void) QueryColorDatabase("#ffffff",&clone_info->fill,&image->exception);
clone_info->clip_mask=(char *) NULL;
status=DrawImage(image->clip_mask,clone_info);
status&=NegateImage(image->clip_mask,MagickFalse);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+1UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*(draw_info->dash_pattern[0]-0.5);
offset=draw_info->dash_offset != 0.0 ? scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot((double) dx,dy);
if (length == 0.0)
{
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
for (total_length=0.0; (total_length+length) <= maximum_length; )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
j=1;
}
else
{
if ((j+1) > (ssize_t) (2*number_vertices))
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((value == 0.0) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->point=point;
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
double
angle,
factor,
primitive_extent;
DrawInfo
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
PointInfo
point;
PixelPacket
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
length,
number_points;
ssize_t
j,
k,
n;
/*
Ensure the annotation info is valid.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
primitive=FileToString(draw_info->primitive+1,~0,&image->exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(
sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=6553;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
(void) QueryColorDatabase("#000000",&start_color,&image->exception);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetMagickToken(q,&q,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.sx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.rx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ry=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.sy=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.tx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ty=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) QueryColorDatabase(token,&graphic_context[n]->border_color,
&image->exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("clip-path",keyword) == 0)
{
/*
Create clip mask.
*/
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->clip_mask,token);
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetMagickToken(q,&q,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetMagickToken(q,&q,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetMagickToken(q,&q,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern);
else
{
double
opacity;
opacity=graphic_context[n]->fill.opacity;
status&=QueryColorDatabase(token,&graphic_context[n]->fill,
&image->exception);
graphic_context[n]->fill.opacity=opacity;
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MaxTextExtent);
graphic_context[n]->fill_pattern=
ReadImage(pattern_info,&image->exception);
CatchException(&image->exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->fill.opacity=ClampToQuantum((MagickRealType)
QuantumRange*(1.0-factor*StringToDouble(token,(char **) NULL)));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetMagickToken(q,&q,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *)
RelinquishMagickMemory(graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->pointsize=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetMagickToken(q,&q,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetMagickToken(q,&q,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->weight=StringToUnsignedLong(token);
if (LocaleCompare(token,"all") == 0)
graphic_context[n]->weight=0;
if (LocaleCompare(token,"bold") == 0)
graphic_context[n]->weight=700;
if (LocaleCompare(token,"bolder") == 0)
if (graphic_context[n]->weight <= 800)
graphic_context[n]->weight+=100;
if (LocaleCompare(token,"lighter") == 0)
if (graphic_context[n]->weight >= 100)
graphic_context[n]->weight-=100;
if (LocaleCompare(token,"normal") == 0)
graphic_context[n]->weight=400;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetMagickToken(q,&q,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetMagickToken(q,&q,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
(char **) NULL);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->kerning=StringToDouble(token,(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("matte",keyword) == 0)
{
primitive_type=MattePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->opacity=ClampToQuantum((MagickRealType)
QuantumRange*(1.0-((1.0-QuantumScale*graphic_context[n]->opacity)*
factor*StringToDouble(token,(char **) NULL))));
graphic_context[n]->fill.opacity=graphic_context[n]->opacity;
graphic_context[n]->stroke.opacity=graphic_context[n]->opacity;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetMagickToken(q,&q,token);
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
break;
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),DrawError,
"UnbalancedGraphicContextPushPop","`%s'",token);
n=0;
break;
}
if (graphic_context[n]->clip_mask != (char *) NULL)
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageClipMask(image,(Image *) NULL);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("pattern",token) == 0)
break;
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetMagickToken(q,&q,token);
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
GetMagickToken(q,&q,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) SetImageArtifact(image,name,token);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
GetMagickToken(q,&q,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetMagickToken(q,&q,token);
(void) CopyMagickString(type,token,MaxTextExtent);
GetMagickToken(q,&q,token);
segment.x1=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.y1=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.x2=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.y2=StringToDouble(token,(char **) NULL);
if (LocaleCompare(type,"radial") == 0)
{
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
}
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
GetMagickToken(q,&q,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetMagickToken(q,&q,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,(char **) NULL)-
0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,(char **) NULL)-
0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.width=(size_t) floor(StringToDouble(token,
(char **) NULL)+0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.height=(size_t) floor(StringToDouble(token,
(char **) NULL)+0.5);
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
break;
}
if (LocaleCompare("defs",token) == 0)
break;
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=StringToDouble(token,(char **) NULL);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.sx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.sy=StringToDouble(token,(char **) NULL);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=StringToDouble(token,(char **) NULL);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=StringToDouble(token,(char **) NULL);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelPacket
stop_color;
GetMagickToken(q,&q,token);
(void) QueryColorDatabase(token,&stop_color,&image->exception);
(void) GradientImage(image,LinearGradient,ReflectSpread,
&start_color,&stop_color);
start_color=stop_color;
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern);
else
{
double
opacity;
opacity=graphic_context[n]->stroke.opacity;
status&=QueryColorDatabase(token,&graphic_context[n]->stroke,
&image->exception);
graphic_context[n]->stroke.opacity=opacity;
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MaxTextExtent);
graphic_context[n]->stroke_pattern=
ReadImage(pattern_info,&image->exception);
CatchException(&image->exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->stroke_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+1UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
for (j=0; j < x; j++)
{
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
(char **) NULL);
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->dash_offset=StringToDouble(token,
(char **) NULL);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetMagickToken(q,&q,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetMagickToken(q,&q,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->stroke.opacity=ClampToQuantum((MagickRealType)
QuantumRange*(1.0-factor*StringToDouble(token,(char **) NULL)));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->stroke_width=StringToDouble(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetMagickToken(q,&q,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetMagickToken(q,&q,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->text_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) QueryColorDatabase(token,&graphic_context[n]->undercolor,
&image->exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.tx=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ty=StringToDouble(token,(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,(char **) NULL)+0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,(char **) NULL)+0.5);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((affine.sx != 1.0) || (affine.rx != 0.0) || (affine.ry != 0.0) ||
(affine.sy != 1.0) || (affine.tx != 0.0) || (affine.ty != 0.0))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",
(int) (q-p),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetMagickToken(q,&q,token);
point.x=StringToDouble(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
point.y=StringToDouble(token,(char **) NULL);
GetMagickToken(q,(const char **) NULL,token);
if (*token == ',')
GetMagickToken(q,&q,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
i++;
if (i < (ssize_t) number_points)
continue;
number_points<<=1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
break;
}
}
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
length=primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
length*=5;
break;
}
case RoundRectanglePrimitive:
{
length*=5+8*BezierQuantum;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates > 107)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
length=BezierQuantum*primitive_info[j].coordinates;
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetMagickToken(q,&q,token);
length=1;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
length++;
}
length=length*BezierQuantum/2;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
length=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360;
break;
}
default:
break;
}
if ((size_t) (i+length) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=length+1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
}
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceEllipse(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
break;
case PolygonPrimitive:
{
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(primitive_info+j,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
i=(ssize_t) (j+TracePath(primitive_info+j,token));
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetMagickToken(q,&q,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetMagickToken(q,&q,token);
primitive_info[j].text=AcquireString(token);
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetMagickToken(q,&q,token);
primitive_info[j].text=AcquireString(token);
break;
}
}
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
status&=DrawPrimitive(image,graphic_context[n],primitive_info);
}
if (primitive_info->text != (char *) NULL)
primitive_info->text=(char *) RelinquishMagickMemory(
primitive_info->text);
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o _info: the draw info.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
double
length,
offset;
PointInfo
v;
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
length=sqrt(v.x*v.x+v.y*v.y);
if (gradient->spread == RepeatSpread)
return(length);
offset=length/gradient->radius;
return(offset);
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
MagickPixelPacket
composite,
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
i,
x;
register PixelPacket
*restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset/=length;
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=repeat/length;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ?
MagickTrue : MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
MagickPixelCompositeOver(&composite,composite.opacity,&pixel,
pixel.opacity,&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern)
{
char
property[MaxTextExtent];
const char
*geometry,
*path;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info);
image_info=DestroyImageInfo(image_info);
(void) QueryColorDatabase("#00000000",&(*pattern)->background_color,
&image->exception);
(void) SetImageBackgroundColor(*pattern);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) CloneString(&clone_info->primitive,path);
status=DrawImage(*pattern,clone_info);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PathInfo
*restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) ResetMagickMemory(polygon_info,0,(size_t)
GetMagickResourceLimit(ThreadResource)*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(draw_info,path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_opacity)
{
double
alpha,
beta,
distance,
subpath_opacity;
PointInfo
delta;
register EdgeInfo
*p;
register const PointInfo
*q;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_opacity=0.0;
subpath_opacity=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta < 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta > alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=1.0/alpha;
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_opacity < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_opacity=1.0;
else
{
beta=1.0;
if (distance != 1.0)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25)))
*stroke_opacity=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_opacity=1.0;
continue;
}
if (distance > 1.0)
continue;
if (beta == 0.0)
{
beta=1.0;
if (distance != 1.0)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_opacity < (alpha*alpha))
subpath_opacity=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_opacity >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_opacity);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
double
mid;
ExceptionInfo
*exception;
MagickBooleanType
fill,
status;
PolygonInfo
**restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start,
stop,
y;
/*
Compute bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates == 0)
return(MagickTrue);
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (0)
DrawBoundingRectangles(image,draw_info,polygon_info[0]);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >=
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=(mid+1.0);
bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >=
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=(mid+1.0);
bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >=
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=(mid+1.0);
bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >=
image->rows ? (double) image->rows-1 : bounds.y2;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if (primitive_info->coordinates == 1)
{
/*
Draw point.
*/
start=(ssize_t) ceil(bounds.y1-0.5);
stop=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=start; y <= stop; y++)
{
MagickBooleanType
sync;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
start,
stop;
if (status == MagickFalse)
continue;
start=(ssize_t) ceil(bounds.x1-0.5);
stop=(ssize_t) floor(bounds.x2+0.5);
x=start;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop-x+1),1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for ( ; x <= stop; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
(void) GetStrokeColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
start=(ssize_t) ceil(bounds.y1-0.5);
stop=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=start; y <= stop; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_opacity,
stroke_opacity;
PixelPacket
fill_color,
stroke_color;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
start,
stop;
if (status == MagickFalse)
continue;
start=(ssize_t) ceil(bounds.x1-0.5);
stop=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start,y,(size_t) (stop-start+1),1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start; x <= stop; x++)
{
/*
Fill and/or stroke.
*/
fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill,
draw_info->fill_rule,x,y,&stroke_opacity);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0;
stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x,y,&fill_color);
fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity));
MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q,
(MagickRealType) q->opacity,q);
(void) GetStrokeColor(draw_info,x,y,&stroke_color);
stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange-
stroke_color.opacity));
MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q,
(MagickRealType) q->opacity,q);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g %g %g %g %g %g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelGray(&draw_info->fill) == MagickFalse) ||
(IsPixelGray(&draw_info->stroke) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace);
status=MagickTrue;
exception=(&image->exception);
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
PixelPacket
fill_color;
PixelPacket
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
pixel;
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel,
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info);
clone_info=DestroyDrawInfo(clone_info);
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_image=ReadInlineImage(clone_info,primitive_info->text,
&image->exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MaxTextExtent);
composite_image=ReadImage(clone_info,&image->exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_image == (Image *) NULL)
break;
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,geometry);
}
if (composite_image->matte == MagickFalse)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel);
if (draw_info->opacity != OpaqueOpacity)
(void) SetImageOpacity(composite_image,draw_info->opacity);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,
&image->exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if (draw_info->compose == OverCompositeOp)
(void) DrawAffineImage(image,composite_image,&affine);
else
(void) CompositeImage(image,draw_info->compose,composite_image,
geometry.x,geometry.y);
composite_image=DestroyImage(composite_image);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(draw_info->dash_pattern[0] != 0.0) &&
((scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
closed_path=
(primitive_info[i-1].point.x == primitive_info[0].point.x) &&
(primitive_info[i-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
i=(ssize_t) primitive_info[0].coordinates;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
}
image_view=DestroyCacheView(image_view);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=(double) (10.0*MagickEpsilon);
linecap[2].point.x+=(double) (10.0*MagickEpsilon);
linecap[2].point.y+=(double) (10.0*MagickEpsilon);
linecap[3].point.y+=(double) (10.0*MagickEpsilon);
linecap[4].primitive=UndefinedPrimitive;
(void) DrawPolygonPrimitive(image,draw_info,linecap);
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,&clone_info->stroke_pattern->exception);
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
stroke_polygon=TraceStrokePolygon(draw_info,p);
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon);
if (status == 0)
break;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
q=p+p->coordinates-1;
closed_path=(q->point.x == p->point.x) && (q->point.y == p->point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
DrawRoundLinecap(image,draw_info,p);
DrawRoundLinecap(image,draw_info,q);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) ResetMagickMemory(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase("#000F",&draw_info->fill,exception);
(void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->opacity=OpaqueOpacity;
draw_info->fill_rule=EvenOddRule;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
draw_info->pointsize=12.0;
if (clone_info->pointsize != 0.0)
draw_info->pointsize=clone_info->pointsize;
draw_info->undercolor.opacity=(Quantum) TransparentOpacity;
draw_info->border_color=clone_info->border_color;
draw_info->compose=OverCompositeOp;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
draw_info->render=MagickTrue;
draw_info->debug=IsEventLogging();
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,(char **) NULL);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,(char **) NULL);
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,(char **) NULL);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->fill,exception);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->stroke,exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,(char **) NULL);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->undercolor,exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radii;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radii.x=fabs(center.x-start.x);
radii.y=fabs(center.y-start.y);
TraceEllipse(primitive_info,center,radii,degrees);
}
static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
if ((start.x == end.x) && (start.y == end.y))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x == 0.0) || (radii.y == 0.0))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(p,4);
p+=p->coordinates;
}
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(PrimitiveInfo *primitive_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coeficients.
*/
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(primitive_info,start,offset,degrees);
}
static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo stop,const PointInfo degrees)
{
double
delta,
step,
y;
PointInfo
angle,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
if ((stop.x == 0.0) && (stop.y == 0.0))
{
TracePoint(primitive_info,start);
return;
}
delta=2.0/MagickMax(stop.x,stop.y);
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/(4*(MagickPI/delta/2+0.5));
angle.x=DegreesToRadians(degrees.x);
y=degrees.y;
while (y < degrees.x)
y+=360.0;
angle.y=(double) (DegreesToRadians(y)-MagickEpsilon);
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
}
static size_t TracePath(PrimitiveInfo *primitive_info,const char *path)
{
char
token[MaxTextExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
PointInfo
end,
points[4],
point,
start;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
attribute=0;
point.x=0.0;
point.y=0.0;
start.x=0.0;
start.y=0.0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle;
MagickBooleanType
large_arc,
sweep;
PointInfo
arc;
/*
Compute arc points.
*/
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
arc.x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
arc.y=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
angle=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(q,point,end,arc,angle,large_arc,sweep);
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
if (q != primitive_info)
{
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
}
i=0;
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
TracePoint(q,point);
q+=q->coordinates;
if ((i != 0) && (attribute == (int) 'M'))
{
TracePoint(q,point);
q+=q->coordinates;
}
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=points[2];
points[1]=points[3];
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=StringToDouble(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=points[2];
points[1]=points[3];
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=StringToDouble(token,(char **) NULL);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
point=start;
TracePoint(q,point);
q+=q->coordinates;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
z_count++;
break;
}
default:
{
if (isalpha((int) ((unsigned char) attribute)) != 0)
(void) FormatLocaleFile(stderr,"attribute not recognized: %c\n",
attribute);
break;
}
}
}
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
offset,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
offset.x=fabs(end.x-start.x);
offset.y=fabs(end.y-start.y);
if (arc.x > (0.5*offset.x))
arc.x=0.5*offset.x;
if (arc.y > (0.5*offset.y))
arc.y=0.5*offset.y;
point.x=start.x+offset.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+offset.x-arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
TracePoint(p,primitive_info->point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static inline double DrawEpsilonReciprocal(const double x)
{
#define DrawEpsilon (1.0e-10)
double sign = x < 0.0 ? -1.0 : 1.0;
return((sign*x) >= DrawEpsilon ? 1.0/x : sign*(1.0/DrawEpsilon));
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx,
dy,
inverse_slope,
slope,
theta;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) ||
(polygon_primitive == (PrimitiveInfo *) NULL))
return((PrimitiveInfo *) NULL);
(void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t)
number_vertices*sizeof(*polygon_primitive));
closed_path=
(primitive_info[number_vertices-1].point.x == primitive_info[0].point.x) &&
(primitive_info[number_vertices-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linejoin == RoundJoin) ||
((draw_info->linejoin == MiterJoin) && (closed_path != MagickFalse)))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
n=(ssize_t) number_vertices-1L;
slope.p=DrawEpsilonReciprocal(dx.p)*dy.p;
inverse_slope.p=(-1.0*DrawEpsilonReciprocal(slope.p));
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=DrawEpsilonReciprocal(dx.q)*dy.q;
inverse_slope.q=(-1.0*DrawEpsilonReciprocal(slope.q));
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360))
{
max_strokes+=6*BezierQuantum+360;
path_p=(PointInfo *) ResizeQuantumMemory(path_p,(size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) ResizeQuantumMemory(path_q,(size_t) max_strokes,
sizeof(*path_q));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL))
{
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
}
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
parallelmerge.c | <<<<<<< HEAD
void partition(int arr[],int arr1[],int low,int high,int thread_count)
{
int tid,mid;
#pragma omp if
if(low<high)
{
if(thread_count==1)
{
mid=(low+high)/2;
partition(arr,arr1,low,mid,thread_count);
partition(arr,arr1,mid+1,high,thread_count);
sort(arr,arr1,low,mid,high);
}
else
{
#pragma omp parallel num_threads(thread_count)
{
mid=(low+high)/2;
#pragma omp sections
{
#pragma omp section
{
partition(arr,arr1,low,mid,thread_count/2);
}
#pragma omp section
{
partition(arr,arr1,mid+1,high,thread_count/2);
}
}
}
sort(arr,arr1,low,mid,high);
}
}
}
=======
void partition(int arr[],int arr1[],int low,int high,int thread_count)
{
int tid,mid;
#pragma omp if
if(low<high)
{
if(thread_count==1)
{
mid=(low+high)/2;
partition(arr,arr1,low,mid,thread_count);
partition(arr,arr1,mid+1,high,thread_count);
sort(arr,arr1,low,mid,high);
}
else
{
#pragma omp parallel num_threads(thread_count)
{
mid=(low+high)/2;
#pragma omp sections
{
#pragma omp section
{
partition(arr,arr1,low,mid,thread_count/2);
}
#pragma omp section
{
partition(arr,arr1,mid+1,high,thread_count/2);
}
}
}
sort(arr,arr1,low,mid,high);
}
}
}
>>>>>>> efd4245fe427ffeefe49c72470b81a015d8dcf82
|
false_sharing.c | #include "omp.h"
#include "stdio.h"
#include "stdlib.h"
#include "malloc.h"
int main(int argc, char* argv[])
{
if(argc < 2)
{
printf("usage: %s [num_threads] <stride>\n",argv[0]);
return 1;
}
int num_threads = atoi(argv[1]);
int stride = 1;
if(argc >= 3)
stride = atoi(argv[2]);
int* array = memalign(1024,num_threads*stride*sizeof(int));
for(int i = 0; i < num_threads; i+=stride)
array[i]=0;
double start_time = omp_get_wtime();
#pragma omp parallel num_threads(num_threads)
{
int index = omp_get_thread_num()*stride;
for(int i = 0; i < 1e9; i++)
array[index]++;
}
double end_time = omp_get_wtime();
printf("Took: %f\n",end_time-start_time);
return 0;
}
|
wrtieee.c | #include <stdio.h>
#include <stddef.h>
#include "wgrib2.h"
/* wesley ebisuzaki v1.3
*
* write ieee file -- big endian format
*
* input float *array data to be written
* int n size of array
* int header 1 for f77 style header 0 for none
* (header is 4 byte header
* FILE *output output file
*
* v1.2 7/97 buffered, faster
* v1.3 2/99 fixed (typo) error in wrtieee_header found by
* Bob Farquhar
* v1.4 3/2008 w. ebisuzaki added little-endian output
* v1.5 11/2013 w. ebisuzaki remove h4[] to cleanup not ititialized warning
* v1.6 7/2015 w. ebisuzaki OpenMP support, write to fwrite_file, bigger buffer
* v1.7 12/2017 w. ebisuzaki: size(float) -> 4
*/
/* BSIZ has to be a multiple of 4 */
#define BSIZ (64u*1024u*4u)
extern int ieee_little_endian;
int wrtieee(float *array, unsigned int n, int header, struct seq_file *out) {
unsigned int i, j, l, nbuf, loop;
unsigned char buff[BSIZ];
nbuf = 0;
if (header) {
if (n >= 4294967295U / 4) // size(ieee) == 4
fatal_error("wrtieee: grid too large for 4-byte header","");
l = n * 4;
buff[nbuf ] = (l >> 24) & 255;
buff[nbuf+1] = (l >> 16) & 255;
buff[nbuf+2] = (l >> 8) & 255;
buff[nbuf+3] = l & 255;
nbuf += 4;
}
i = 0;
while (i < n) {
loop = (BSIZ - nbuf)/4;
loop = (n-i) > loop ? loop : (n-i);
#pragma omp parallel for private(j) schedule(static)
for (j = 0 ; j < loop; j++) {
flt2ieee(array[i+j], buff + nbuf + j*4);
}
i += loop;
nbuf += 4*loop;
if (nbuf >= BSIZ) { // nbuf should never be > BSIZ
if (ieee_little_endian) swap_buffer(buff, BSIZ);
fwrite_file(buff, 1, BSIZ, out);
nbuf = 0;
}
}
if (header) {
l = n * 4;
buff[nbuf ] = (l >> 24) & 255;
buff[nbuf+1] = (l >> 16) & 255;
buff[nbuf+2] = (l >> 8) & 255;
buff[nbuf+3] = l & 255;
nbuf += 4;
}
if (nbuf) {
if (ieee_little_endian) swap_buffer(buff, nbuf);
fwrite_file(buff, 1, nbuf, out);
}
return 0;
}
|
tensor_cpu-inl.h | /*!
* Copyright (c) 2014 by Contributors
* \file tensor_cpu-inl.h
* \brief implementation of CPU host code
* \author Bing Xu, Tianqi Chen
*/
#ifndef MSHADOW_TENSOR_CPU_INL_H_
#define MSHADOW_TENSOR_CPU_INL_H_
#include <cstring>
#include <functional>
#include <utility>
#include <vector>
#include "./base.h"
#include "./tensor.h"
#include "./packet-inl.h"
#include "./dot_engine-inl.h"
namespace mshadow {
template<>
inline void InitTensorEngine<cpu>(int dev_id) {
}
template<>
inline void ShutdownTensorEngine<cpu>(void) {
}
template<>
inline void SetDevice<cpu>(int devid) {
}
template<>
inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle,
bool create_dnn_handle) {
return new Stream<cpu>();
}
template<>
inline void DeleteStream<cpu>(Stream<cpu> *stream) {
delete stream;
}
template<int ndim>
inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*)
os << '(';
for (int i = 0; i < ndim; ++i) {
if (i != 0) os << ',';
os << shape[i];
}
// python style tuple
if (ndim == 1) os << ',';
os << ')';
return os;
}
template<typename xpu>
inline void *AllocHost_(size_t size);
template<typename xpu>
inline void FreeHost_(void * dptr);
#ifdef __CUDACC__
template<>
inline void *AllocHost_<gpu>(size_t size) {
void *dptr;
MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable));
return dptr;
}
template<>
inline void FreeHost_<gpu>(void *dptr) {
MSHADOW_CUDA_CALL(cudaFreeHost(dptr));
}
#endif
template<>
inline void *AllocHost_<cpu>(size_t size) {
size_t pitch;
return packet::AlignedMallocPitch(&pitch, size, 1);
}
template<>
inline void FreeHost_<cpu>(void *dptr) {
packet::AlignedFree(dptr);
}
template<typename xpu, int dim, typename DType>
inline void AllocHost(Tensor<cpu, dim, DType> *obj) {
obj->stride_ = obj->size(dim - 1);
CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost";
void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType));
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename xpu, int dim, typename DType>
inline void FreeHost(Tensor<cpu, dim, DType> *obj) {
if (obj->dptr_ == NULL) {
LOG(FATAL) << "FreeHost:: double free";
}
FreeHost_<xpu>(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) {
size_t pitch;
void *dptr;
if (pad) {
dptr = packet::AlignedMallocPitch
(&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]);
obj->stride_ = static_cast<index_t>(pitch / sizeof(DType));
} else {
obj->stride_ = obj->size(dim - 1);
dptr = packet::AlignedMallocPitch
(&pitch, obj->shape_.Size() * sizeof(DType), 1);
}
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename Device, typename DType, int dim>
inline Tensor<Device, dim, DType>
NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) {
Tensor<Device, dim, DType> obj(shape);
obj.stream_ = stream_;
AllocSpace(&obj, pad);
MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv));
return obj;
}
template<int dim, typename DType>
inline void FreeSpace(Tensor<cpu, dim, DType> *obj) {
packet::AlignedFree(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void Copy(Tensor<cpu, dim, DType> _dst,
const Tensor<cpu, dim, DType> &_src,
Stream<cpu> *stream) {
CHECK_EQ(_dst.shape_, _src.shape_)
<< "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_;
if (_dst.CheckContiguous() && _src.CheckContiguous()) {
memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size());
} else {
Tensor<cpu, 2, DType> dst = _dst.FlatTo2D();
Tensor<cpu, 2, DType> src = _src.FlatTo2D();
for (index_t y = 0; y < dst.size(0); ++y) {
memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1));
}
}
}
template<typename Saver, typename R, int dim,
typename DType, typename E>
inline void MapPlan(TRValue<R, cpu, dim, DType> *dst,
const expr::Plan<E, DType> &plan) {
Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D();
expr::Plan<R, DType> dplan = expr::MakePlan(dst->self());
// #pragma omp parallel for
// temp remove openmp, as default setting throttles CPU
for (index_t y = 0; y < shape[0]; ++y) {
for (index_t x = 0; x < shape[1]; ++x) {
// trust your compiler! -_- they will optimize it
Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x));
}
}
}
// code to handle SSE optimization
template<bool pass_check, typename Saver,
typename R, int dim,
typename DType, typename E, int etype>
struct MapExpCPUEngine {
inline static void Map(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
MapPlan<Saver>(dst, MakePlan(exp.self()));
}
};
template<typename SV, int dim, typename DType, typename E, int etype>
struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>,
dim, DType, E, etype> {
inline static void Map(Tensor<cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) &&
expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) {
expr::MapPacketPlan<SV>(dst->self(),
expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self()));
} else {
MapPlan<SV>(dst, MakePlan(exp.self()));
}
}
};
template<typename Saver, typename R, int dim,
typename DType, typename E, int etype>
inline void MapExp(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass>
::Error_All_Tensor_in_Exp_Must_Have_Same_Type();
Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self());
Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self());
CHECK(eshape[0] == 0 || eshape == dshape)
<< "Assignment: Shape of Tensors are not consistent with target, "
<< "eshape: " << eshape << " dshape:" << dshape;
MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass,
Saver, R, dim, DType, E, etype>
::Map(dst->ptrself(), exp);
}
template<typename Saver, typename Reducer,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self()).FlatTo2D();
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match";
CHECK_NE(eshape[0], 0) << "can not reduce over empty tensor";
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
for (index_t x = 0; x < eshape[1]; ++x) {
DType res = splan.Eval(0, x);
for (index_t y = 1; y < eshape[0]; ++y) {
Reducer::Reduce(res, splan.Eval(y, x));
}
Saver::template Save<DType>(dplan.REval(0, x), res * scale);
}
}
template<typename Saver, typename Reducer, int dimkeep,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
typedef Shape<expr::ExpInfo<E>::kDim> EShape;
EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self());
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[dimkeep], dshape[0])
<< "MapReduceKeepHighDim::reduction dimension do not match";
// use equvalent form
Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep),
eshape[dimkeep],
eshape.ProdShape(dimkeep + 1, EShape::kSubdim),
eshape[EShape::kSubdim]);
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
for (index_t c = 0; c < pshape[1]; ++c) {
DType res; Reducer::SetInitValue(res);
for (index_t n = 0; n < pshape[0]; ++n) {
DType tres; Reducer::SetInitValue(tres);
for (index_t y = 0; y < pshape[2]; ++y) {
for (index_t x = 0; x < pshape[3]; ++x) {
Reducer::Reduce(tres,
splan.Eval((n * pshape[1] + c) * pshape[2] + y, x));
}
}
Reducer::Reduce(res, tres);
}
Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale));
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 1, DType> dst,
const Tensor<cpu, 1, DType> &energy) {
DType mmax = energy[0];
for (index_t x = 1; x < dst.size(0); ++x) {
if (mmax < energy[x]) mmax = energy[x];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] = std::exp(energy[x] - mmax);
sum += dst[x];
}
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] /= sum;
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const DType &ignore_label) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (static_cast<int>(ignore_label) == k) {
dst[y][x] = 0.0f;
} else {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label) {
for (index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y][n]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const DType &ignore_label) {
for (index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y][n]);
if (k == static_cast<int>(ignore_label)) {
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = DType(0.0f);
}
} else {
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
for (index_t y = 0; y < dst.size(0); ++y) {
Softmax(dst[y], energy[y]);
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
for (index_t y = 0; y < dst.size(0); ++y) {
for (index_t n = 0; n < dst.size(2); ++n) {
DType mmax = energy[y][0][n];
for (index_t x = 1; x < dst.size(1); ++x) {
if (mmax < energy[y][x][n]) mmax = energy[y][x][n];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = std::exp(energy[y][x][n] - mmax);
sum += dst[y][x][n];
}
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] /= sum;
}
}
}
}
template<typename IndexType, typename DType>
inline void AddTakeGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < index.size(0); ++y) {
dst[index[y]] += src[y];
}
}
template<typename IndexType, typename DType>
inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& sorted,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < sorted.size(0); ++y) {
dst[sorted[y]] += src[index[y]];
}
}
template<typename KDType, typename VDType>
inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values,
bool is_ascend) {
CHECK_EQ(keys.CheckContiguous(), true);
CHECK_EQ(values.CheckContiguous(), true);
CHECK_EQ(keys.size(0), values.size(0))
<< "The sizes of key/value are not equal! keys_size: " << keys.size(0)
<< "values_size: " << values.size(0);
std::vector<std::pair<KDType, VDType> > V;
for (index_t i = 0; i < values.size(0); ++i) {
std::pair<KDType, VDType> P = std::make_pair(keys[i], values[i]);
V.push_back(P);
}
if (is_ascend) {
std::stable_sort(V.begin(), V.end());
} else {
std::stable_sort(V.begin(), V.end(), std::greater<std::pair<KDType, VDType> >());
}
for (index_t i = 0; i < values.size(0); i++) {
keys[i] = V[i].first;
values[i] = V[i].second;
}
}
template<typename Device, typename VDType, typename SDType>
inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) {
// We can sort each segments using two stable sorts
SortByKey(values, segments, true);
SortByKey(segments, values, true);
}
// blas related
template<typename Device, typename DType>
inline void VectorDot(Tensor<Device, 1, DType> dst,
const Tensor<Device, 1, DType> &lhs,
const Tensor<Device, 1, DType> &rhs) {
CHECK_EQ(lhs.size(0), rhs.size(0))
<< "VectorDot: Shape mismatch";
CHECK_EQ(dst.size(0), 1)
<< "VectorDot: expect dst to be scalar";
expr::BLASEngine<Device, DType>::SetStream(lhs.stream_);
mshadow::expr::BLASEngine<Device, DType>::dot(
lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_);
}
template<bool transpose_left, bool transpose_right, typename Device, typename DType>
inline void BatchGEMM(Tensor<Device, 3, DType> dst,
const Tensor<Device, 3, DType> &lhs,
const Tensor<Device, 3, DType> &rhs,
DType alpha,
DType beta,
Tensor<Device, 1, DType*> workspace) {
int batch_size = dst.shape_[0];
expr::BLASEngine<Device, DType>::SetStream(dst.stream_);
Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1])
: lhs.shape_;
Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1])
: rhs.shape_;
CHECK_EQ(dst.CheckContiguous(), true);
CHECK_EQ(lhs.CheckContiguous(), true);
CHECK_EQ(rhs.CheckContiguous(), true);
CHECK(sleft[0] == batch_size && sright[0] == batch_size)
<< "BatchGEMM: batchsize must be equal."
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1])
<< "BatchGEMM: matrix shape mismatch"
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(workspace.size(0) >= 3 * batch_size)
<< "Workspace Size must be bigger than " << 3 * batch_size;
CHECK_EQ(workspace.CheckContiguous(), true);
// use column major argument to compatible with most BLAS
expr::BLASEngine<Device, DType>::batched_gemm
(dst.stream_,
transpose_right, transpose_left,
transpose_right ? rhs.size(1) : rhs.size(2),
transpose_left ? lhs.size(2) : lhs.size(1),
transpose_right ? rhs.size(2) : rhs.size(1),
alpha,
rhs.dptr_, rhs.stride_,
lhs.dptr_, lhs.stride_,
beta,
dst.dptr_, dst.stride_, batch_size,
workspace.dptr_);
}
} // namespace mshadow
#endif // MSHADOW_TENSOR_CPU_INL_H_
|
DRB015-outofbounds-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is be parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@80:7 vs. b[i][j-1]@80:15
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,j;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int n=len, m=len;
double b[n][m];
#pragma omp parallel for private(j)
for (i=1;i<n;i++)
for (j=0;j<m;j++) // Note there will be out of bound access
b[i][j]=b[i][j-1];
return 0;
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor, ResInnerStride> {
typedef gebp_traits <RhsScalar, LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar *lhs, Index lhsStride,
const RhsScalar *rhs, Index rhsStride,
ResScalar *res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar, LhsScalar> &blocking,
GemmParallelInfo <Index> *info = 0) {
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor, ResInnerStride>
::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resIncr, resStride, alpha, blocking,
info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor, ResInnerStride> {
typedef gebp_traits <LhsScalar, RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar *_lhs, Index lhsStride,
const RhsScalar *_rhs, Index rhsStride,
ResScalar *_res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar, RhsScalar> &blocking,
GemmParallelInfo <Index> *info = 0) {
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper;
LhsMapper lhs(_lhs, lhsStride);
RhsMapper rhs(_rhs, rhsStride);
ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows, blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols, blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#pragma omp atomic
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc * mc;
std::size_t sizeB = kc * nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc != rows && kc == depth && nc == cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for (Index i2 = 0; i2 < rows; i2 += mc) {
const Index actual_mc = (std::min)(i2 + mc, rows) - i2;
for (Index k2 = 0; k2 < depth; k2 += kc) {
const Index actual_kc = (std::min)(k2 + kc, depth) - k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for (Index j2 = 0; j2 < cols; j2 += nc) {
const Index actual_nc = (std::min)(j2 + nc, cols) - j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if ((!pack_rhs_once) || i2 == 0)
pack_rhs(blockB, rhs.getSubMapper(k2, j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor {
gemm_functor(const Lhs &lhs, const Rhs &rhs, Dest &dest, const Scalar &actualAlpha, BlockingType &blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {}
void initParallelSession(Index num_threads) const {
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator()(Index row, Index rows, Index col = 0, Index cols = -1,
GemmParallelInfo <Index> *info = 0) const {
if (cols == -1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row, 0), m_lhs.outerStride(),
&m_rhs.coeffRef(0, col), m_rhs.outerStride(),
(Scalar *) &(m_dest.coeffRef(row, col)), m_dest.innerStride(), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs &m_lhs;
const Rhs &m_rhs;
Dest &m_dest;
Scalar m_actualAlpha;
BlockingType &m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor = 1,
bool FiniteAtCompileTime = MaxRows != Dynamic && MaxCols != Dynamic && MaxDepth != Dynamic>
class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking {
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar *m_blockA;
RhsScalar *m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar *blockA() { return m_blockA; }
inline RhsScalar *blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type,
typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> {
enum {
Transpose = StorageOrder == RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar;
typedef gebp_traits <LhsScalar, RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/,
bool /*full_rows = false*/) {
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index) {}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type,
typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> {
enum {
Transpose = StorageOrder == RowMajor
};
typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar;
typedef gebp_traits <LhsScalar, RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) {
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if (l3_blocking) {
computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, this->m_mc, this->m_nc,
num_threads);
} else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads) {
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA == 0 && this->m_blockB == 0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA() {
if (this->m_blockA == 0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB() {
if (this->m_blockB == 0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll() {
allocateA();
allocateB();
}
~gemm_blocking_space() {
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs, Rhs, DenseShape, DenseShape, GemmProduct>
: generic_product_impl_base<Lhs, Rhs, generic_product_impl<Lhs, Rhs, DenseShape, DenseShape, GemmProduct> > {
typedef typename Product<Lhs, Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,
Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst &dst, const Lhs &lhs, const Rhs &rhs) {
if ((rhs.rows() + dst.rows() + dst.cols()) < 20 && rhs.rows() > 0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar, Scalar>());
else {
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst &dst, const Lhs &lhs, const Rhs &rhs) {
if ((rhs.rows() + dst.rows() + dst.cols()) < 20 && rhs.rows() > 0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar, Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst &dst, const Lhs &lhs, const Rhs &rhs) {
if ((rhs.rows() + dst.rows() + dst.cols()) < 20 && rhs.rows() > 0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar, Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar &alpha) {
eigen_assert(dst.rows() == a_lhs.rows() && dst.cols() == a_rhs.cols());
if (a_lhs.cols() == 0 || a_lhs.rows() == 0 || a_rhs.cols() == 0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags & RowMajorBit) ? RowMajor
: ColMajor, LhsScalar, RhsScalar,
Dest::MaxRowsAtCompileTime, Dest::MaxColsAtCompileTime, MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(
LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags & RowMajorBit) ? RowMajor : ColMajor, bool(
RhsBlasTraits::NeedToConjugate),
(Dest::Flags & RowMajorBit) ? RowMajor : ColMajor,
Dest::InnerStrideAtCompileTime>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime > 32 || Dest::MaxRowsAtCompileTime == Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(),
Dest::Flags & RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
mixed_tentusscher_myo_epi_2004.c | #include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
/// initial condition
real sv11[]={86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
sv[0] = sv11[0]; // V; millivolt
sv[1] = sv11[1]; //M
sv[2] = sv11[2]; //H
sv[3] = sv11[3]; //J
sv[4] = sv11[4]; //Xr1
sv[5] = sv11[5]; //Xr2
sv[6] = sv11[6]; //Xs
sv[7] = sv11[7]; //S
sv[8] = sv11[8]; //R
sv[9] = sv11[9]; //D
sv[10] = sv11[10]; //F
sv[11] = sv11[11]; //FCa
sv[12] = sv11[12]; //G
sv[13] = sv11[13]; //Cai
sv[14] = sv11[14]; //CaSR
sv[15] = sv11[15]; //Nai
sv[16] = sv11[16]; //Ki
// sv[0] = INITIAL_V; // V; millivolt
// sv[1] = 0.f; //M
// sv[2] = 0.75; //H
// sv[3] = 0.75f; //J
// sv[4] = 0.f; //Xr1
// sv[5] = 1.f; //Xr2
// sv[6] = 0.f; //Xs
// sv[7] = 1.f; //S
// sv[8] = 0.f; //R
// sv[9] = 0.f; //D
// sv[10] = 1.f; //F
// sv[11] = 1.f; //FCa
// sv[12] = 1.f; //G
// sv[13] = 0.0002; //Cai
// sv[14] = 0.2f; //CaSR
// sv[15] = 11.6f; //Nai
// sv[16] = 138.3f; //Ki
}
// Initial conditions for TenTusscher epicardium
else
{
// sv[0] = INITIAL_V; // V; millivolt
// sv[1] = 0.f; //M
// sv[2] = 0.75; //H
// sv[3] = 0.75f; //J
// sv[4] = 0.f; //Xr1
// sv[5] = 1.f; //Xr2
// sv[6] = 0.f; //Xs
// sv[7] = 1.f; //S
// sv[8] = 0.f; //R
// sv[9] = 0.f; //D
// sv[10] = 1.f; //F
// sv[11] = 1.f; //FCa
// sv[12] = 1.f; //G
// sv[13] = 0.0002; //Cai
// sv[14] = 0.2f; //CaSR
// sv[15] = 11.6f; //Nai
// sv[16] = 138.3f; //Ki
/// initial condition
real sv11[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
sv[0] = sv11[0]; // V; millivolt
sv[1] = sv11[1]; //M
sv[2] = sv11[2]; //H
sv[3] = sv11[3]; //J
sv[4] = sv11[4]; //Xr1
sv[5] = sv11[5]; //Xr2
sv[6] = sv11[6]; //Xs
sv[7] = sv11[7]; //S
sv[8] = sv11[8]; //R
sv[9] = sv11[9]; //D
sv[10] = sv11[10]; //F
sv[11] = sv11[11]; //FCa
sv[12] = sv11[12]; //G
sv[13] = sv11[13]; //Cai
sv[14] = sv11[14]; //CaSR
sv[15] = sv11[15]; //Nai
sv[16] = sv11[16]; //Ki
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_unop__lnot_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_int32_int32)
// op(A') function: GB (_unop_tran__lnot_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gaussprod.c | // Copyright (c) 2018-2019 Osamu Hirose
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<assert.h>
#include<time.h>
#include"misc.h"
#include"kdtree.h"
#include"lapack.h"
#include"kernel.h"
#include"gaussprod.h"
void gaussprod(
double *f, /* O | I = M or N | resulting values */
double *wd, /* W | * | memory */
int *wi, /* W | * | memory */
const double *Y, /* I | D x M | input matrix Y */
const double *X, /* I | D x N | input matrix X */
const double *q, /* I | J = N or M | weight vector q */
int *T, /* I/W | 3 x J +1 | kdtree */
int D, /* I | const. | dimension */
int M, /* I | const. | #points in Y */
int N, /* I | const. | #points in X */
int P, /* I | const. | #nystrom samples */
double h, /* I | const. | band width */
double dlt, /* I | const. | neighbor width rate for h */
double lim, /* I | const. | maximum radius for kdtree */
int flg /* I | const. | flag:local+reuse+trans */
){
int d,m,n,p,i,j,I,J,L=M+N; const double *A,*B; int info,one=1; char uplo='U'; int *U=NULL;
double *Z=NULL,*K=NULL,*v=NULL; double reg=1e-8,rad=fmin(h*dlt,lim); int si=0,sd=0;
int mtd=MAXTREEDEPTH; int *a,*u,*S,*bi; double *bd;
/* memory: always use the same allocation if P>0 */
if(P){U=wi+si;si+=L; Z=wd+sd;sd+=D*L; v=wd+sd;sd+=P; K=wd+sd;sd+=P*P;}
/* switch */
if(flg&GRAM_FLAG_TRANS) {I=N;J=M;A=X;B=Y;} else {I=M;J=N;A=Y;B=X;}
if(flg&GRAM_FLAG_LOCAL) {goto neighbor; } if(!P) {goto direct; }
if(flg&GRAM_FLAG_REUSE) {goto nystrom_r; } else {goto nystrom; }
direct:
#pragma omp parallel for private (j)
for(i=0;i<I;i++){f[i]=0;for(j=0;j<J;j++)f[i]+=q[j]*gauss(A+D*i,B+D*j,D,h);} return;
neighbor: assert(T);
bi=wi+si;wi+=6*J; a=wi+si;si+=I; S=wi+si;si+=mtd*I;
bd=wd+sd;wd+=2*J; u=wi+si;si+=I;
if(flg&GRAM_FLAG_BUILD) kdtree(T,bi,bd,B,D,J);
#pragma omp parallel for private (j)
for(i=0;i<I;i++){a[i]=u[i]=f[i]=0;
do{
eballsearch_next(a+i,S+mtd*i,u+i,A+D*i,rad,B,T,D,J);
j=a[i]; if(j>=0) f[i]+=q[j]*gauss(A+D*i,B+D*j,D,h);
} while(u[i]);
}
return;
nystrom: randperm(U,L);
for(d=0;d<D;d++)for(m=0;m<M;m++) Z[d+D*( m )]=Y[d+D*m];
for(d=0;d<D;d++)for(n=0;n<N;n++) Z[d+D*(M+n)]=X[d+D*n];
for(i=0;i<P;i++)for(j=0;j<P;j++) K[i+P*j]=gauss(Z+D*U[i],Z+D*U[j],D,h)+(i==j?reg:0);
dpotrf_(&uplo,&P,K,&P,&info); if(info!=0){goto err01;}
nystrom_r:
#pragma omp parallel for private (j)
for(p=0;p<P;p++){v[p]=0;for(j=0;j<J;j++)v[p]+=gauss(Z+D*U[p],B+D*j,D,h)*q[j];}
dpotrs_(&uplo,&P,&one,K,&P,v,&P,&info); if(info!=0){goto err02;}
#pragma omp parallel for private (p)
for(i=0;i<I;i++){f[i]=0;for(p=0;p<P;p++)f[i]+=gauss(Z+D*U[p],A+D*i,D,h)*v[p];}
return;
err01: printf("ERROR: The Cholesky factorization failed at gaussprod.\n"); exit(EXIT_FAILURE);
err02: printf("ERROR: Solving linear equations failed at gaussprod. \n"); exit(EXIT_FAILURE);
}
/* faster implementation with kdtree, specialized for w and PX */
void gaussprod_kdbatch(
double *w, /* O | M | required for w=P1 */
double *PX, /* O | M | required for x=inv(w)*PX */
int *wi, /* W | * | memory */
const double *Y, /* I | D x M | input matrix Y */
const double *X, /* I | D x N | input matrix X */
const double *q, /* I | N | weight vector q */
int *T, /* I/W | 3 x N +1 | kdtree */
int D, /* I | const. | dimension */
int M, /* I | const. | #points in Y */
int N, /* I | const. | #points in X */
double h, /* I | const. | band width */
double dlt, /* I | const. | neighbor width rate for h */
double lim /* I | const. | maximum radius for kdtree */
){
int d,m,n; double rad=fmin(h*dlt,lim); int si=0; int mtd=MAXTREEDEPTH;
double val; int *a,*u,*S;
assert(T); a=wi+si;si+=M; u=wi+si;si+=M; S=wi+si;si+=mtd*M;
#pragma omp parallel for private (d) private (n) private(val)
for(m=0;m<M;m++){a[m]=u[m]=w[m]=0;for(d=0;d<D;d++)PX[m+M*d]=0;
do{
eballsearch_next(a+m,S+mtd*m,u+m,Y+D*m,rad,X,T,D,N); n=a[m];
if(n>=0){val=q[n]*gauss(Y+D*m,X+D*n,D,h);w[m]+=val;for(d=0;d<D;d++)PX[m+M*d]+=X[d+D*n]*val;}
} while(u[m]);
}
return;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(8*t1+Ny+7,32)),floord(16*t2+Ny+3,32)),floord(16*t1-16*t2+Nz+Ny+5,32));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(32*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(32*t3+Nx+19,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),8*t3+6),32*t4+30);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
axpy_omp.c | // Experimental test input for Accelerator directives
// simplest scalar*vector operations
// Liao 1/15/2013
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
#define NUM_RUNS 10
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/* change this to do saxpy or daxpy : single precision or double precision*/
#define REAL double
#define VEC_LEN 1024000 //use a fixed number for now
/* zero out the entire vector */
void zero(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = 0.0;
}
}
/* initialize a vector with random floating point numbers */
void init(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = (double)drand48();
}
}
/*serial version */
void axpy(REAL* x, REAL* y, long n, REAL a) {
int i;
for (i = 0; i < n; i++)
{
y[i] += a * x[i];
}
}
/* compare two arrays and return percentage of difference */
REAL check(REAL*A, REAL*B, int n)
{
int i;
REAL diffsum =0.0, sum = 0.0;
for (i = 0; i < n; i++) {
diffsum += fabs(A[i] - B[i]);
sum += fabs(B[i]);
}
return diffsum/sum;
}
void axpy_ompacc(REAL* x, REAL* y, int n, REAL a) {
int i;
/* //implementation of the following omp target region
#pragma omp target teams distribute parallel for device (0) map(tofrom: y[0:n]) map(to: x[0:n],a,n) shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
*/
//#pragma omp target map(tofrom: y[0:n]) map(to: x[0:n],a,n)
//#pragma omp parallel for shared(x, y, n, a) private(i)
#pragma omp target teams distribute parallel for map(tofrom: y[0:n]) map(to: x[0:n],a,n) shared(x, y, n, a) private(i) num_teams(256) num_threads(1024)
for (i = 0; i < n; i++)
y[i] += a * x[i];
}
int main(int argc, char *argv[])
{
int n;
REAL *y_ompacc, *y, *x;
REAL a = 123.456;
n = 1 << 23; // 2^23, 8 million
fprintf(stderr, "Usage: axpy <n>, where the problem size is 2^n.\n");
if (argc >= 2) {
n = 1 << atoi(argv[1]);
}
y_ompacc = (REAL *) malloc(n * sizeof(REAL));
y = (REAL *) malloc(n * sizeof(REAL));
x = (REAL *) malloc(n * sizeof(REAL));
srand48(1<<12);
init(x, n);
init(y_ompacc, n);
memcpy(y, y_ompacc, n*sizeof(REAL));
axpy(x, y, n, a);
int i;
double elapsed = read_timer_ms();
for (i = 0; i < NUM_RUNS; i++)
axpy_ompacc(x, y, n, a);
elapsed = (read_timer_ms() - elapsed)/NUM_RUNS;
REAL checkresult = check(y_ompacc, y, n);
fprintf(stderr, "axpy(%d): checksum: %g, time: %0.2fms\n", n, checkresult, elapsed);
//assert (checkresult < 1.0e-10);
printf("%g", elapsed);
free(y_ompacc);
free(y);
free(x);
return 0;
}
|
GB_positional_op_ip.c | //------------------------------------------------------------------------------
// GB_positional_op_ip: C = positional_op (A), depending only on i
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// A can be jumbled. If A is jumbled, so is C.
{
//--------------------------------------------------------------------------
// Cx = positional_op (A)
//--------------------------------------------------------------------------
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
// Cx [p] = op (A (i,j))
int64_t i = GBI (Ai, p, avlen) ;
GB_APPLY (p) ;
}
}
#undef GB_APPLY
|
ompsimple.c | int myomp;
#pragma omp threadprivate(myomp)
int main1() {
#pragma omp parallel
{
myomp = 1;
/* printf("Thread x= %d\n", myomp); */
}
return 0;
}
|
convolutiondepthwise_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
calcium_sparks_old.c | #include <math.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include "mpi.h"
#include "hdf5.h"
#include <sys/stat.h>
//#define DEBUG_TEST
#define DB_PF 0
#define MAX_LINE_LENGTH 80
//#define __PAPI__
#ifdef __PAPI__
#include <papi.h>
#endif
typedef int(*CONDCF)(int a, int b);
#define H5T_DATA_TYPE H5T_NATIVE_SHORT
typedef short int hdf5_data_type;
#define H5_DATA_LIMIT_0 -32768 // Data type specific
#define H5_DATA_LIMIT_1 32767 // Data type specific
#define H5_DATA_SIZE H5_DATA_LIMIT_1 - H5_DATA_LIMIT_0 // Data type specific
double timing();
void *mpi_malloc ( int id, int bytes); /* IN - Bytes to allocate */
inline double my_random();
double my_min(double* ar, int len);
double my_max(double* ar, int len);
void stern(double t, double* y0, double* y1, double Ca);
void stern_discrete(double dt, int* y0, int* y1, double Ca);
void laplace3D (int nx0, int ny0, int nz0, double* C0,
int nx1, int ny1, int nz1, double* C1,
double alpha);//, int num_threads)
void reaction3D (int nx0, int ny0, int nz0, double* Ca,
int nx1, int ny1, int nz1, double* buff,
double B_tot, double k_on, double k_off, double dt);//, int num_threads)
void serca3D (int nx0, int ny0, int nz0, double* Ca_i,
int nx1, int ny1, int nz1, double* Ca_SR,
double dt, double gamma, double fudge);//, int num_threads)
void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN,
double* C10, double* C12, double* C13, double* C14,
double k_on_CSQN, double k_off_CSQN, double CSQN_tot,
double gamma, double K, double dt,
int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr,
int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn,
int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb,
int* states0, int* states1);
void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid);
void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid);
void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1,
double scale_value, hsize_t* chunk_dims);
void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols);
//int less(int a, int b);
//int giant(int a, int b);
//int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond);
int* loadRyRindexfile_int(char* infile, int* count);
int idxinrank(int nx, int ny, int nz,
int i0, int i1, int i2,
int rank, MPI_Comm comm3d);
int idxbl2rank(int nx, int ny, int nz,
int i0, int i1, int i2,
int* coords,
MPI_Comm comm3d);
int load_indices_serial(int nx, int ny, int nz, int h,
int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len,
int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len,
int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len,
int x_slice_mid,int x_slice_width, int x_slice_num, int use_failing);
int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len);
void BinarySort_two(int* pData, int* vData, int Count);
void dichotomy_two(int* pData,int* vData, int left,int right);
int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z,
int nx, int ny, int nz,
int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len,
int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len,
int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len,
int** states0, int** states1,
int x_slice_mid,int x_slice_width, int x_slice_num,
MPI_Comm comm3d, MPI_Comm, int use_failing);
void readparam(int* iconf, double* conf);
void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04,
int C_flag, int nx0, int ny0, int nz0,
double* yz_sbuf0,double* yz_rbuf0,
double* xz_sbuf0,double* xz_rbuf0,
double* xy_sbuf0,double* xy_rbuf0,
double* yz_sbuf1,double* yz_rbuf1,
double* xz_sbuf1,double* xz_rbuf1,
double* xy_sbuf1,double* xy_rbuf1,
int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req,
MPI_Comm comm,MPI_Comm comm3d);
void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len);
void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len);
void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len);
void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len);
void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len);
void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len);
void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge,
double* alpha, double* B_tot, double* k_on, double* k_off,
double** C0, double** C1, int div_y);
#define NUM_SAVE_SPECIES 5
int save_species[NUM_SAVE_SPECIES] = {0,1,4,5,6};
char* species_names[7] = {"Cai", "CaSR", "CaCMDN", "CaATP", "CaFluo", "CaTRPN", "CaCSQN"};
int main(int argc, char **argv)
{
int i,j,k;
#ifdef __PAPI__
// int Events[] = { PAPI_L1_DCA, PAPI_L1_DCM };
// int Events[] = {PAPI_L3_TCM, PAPI_L3_TCA, PAPI_L2_TCM,PAPI_L2_TCA};
int Events[] = {PAPI_DP_OPS,PAPI_L3_TCM};
int NUM_EVENTS = sizeof(Events)/sizeof(Events[0]);
long long res_papi[NUM_EVENTS];
char EventName[128];
int num_hwcntrs = 0;
int EventSet = PAPI_NULL;
int retval;
retval = PAPI_library_init( PAPI_VER_CURRENT );
retval = PAPI_create_eventset( &EventSet );
if (PAPI_add_events( EventSet, Events, NUM_EVENTS) != PAPI_OK){
printf("PAPI_add_events failed\n");
}
for (i=0; i<NUM_EVENTS; i++){
res_papi[i] = 0;
}
#endif
double time_main=0.0;
double time_comm=0.0;
double time_conc=0.0;
double time_ryr=0.0;
double time_io=0.0;
int save_data=0;
int use_rand_seed=1;
int use_failing=0;
int idx;
int h_scale=1;
int h=30;
int div_y=1;
int save_binary_file=0;
int save_hdf5=0;
double T=1.0;
double DT=0.05; // plotting time step
int TimeStep=2;
int size_x, size_y, size_z, my_id, x_domains, y_domains, z_domains;
int iconf[12];
double conf[2];
/* MPI variables */
int nproc, ndims;
MPI_Comm comm, comm3d;
int dims[3];
int periods[3];
int reorganisation = 0;
MPI_Datatype matrix_type_oyz, matrix_type_oxz, matrix_type_oxy;
int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5;
int NeighBor[6];
hid_t h5_file_id;
hdf5_data_type* h5_data;
MPI_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
MPI_Comm_size(comm, &nproc);
MPI_Comm_rank(comm, &my_id);
MPI_Info info = MPI_INFO_NULL;
if (my_id==0) {
readparam(iconf, conf);
}
MPI_Bcast(iconf, 12, MPI_INT, 0, comm);
MPI_Bcast(conf, 2, MPI_DOUBLE, 0, comm);
h = iconf[0];
size_x = iconf[1];
size_y = iconf[2];
size_z = iconf[3];
x_domains = iconf[4];
y_domains = iconf[5];
z_domains = iconf[6];
save_data = iconf[7];
use_failing = iconf[8];
save_binary_file = iconf[9]; // Save Ca in binary file instead of ascii file
save_hdf5 = iconf[10]; // Save data in hdf5 file format
div_y = iconf[11]; // Block size on y direction for cache
T = conf[0];
DT = conf[1];
h_scale=30/h;
if(use_rand_seed) srand(my_id);
char hdf5_dataset_name[200];
char hdf5_group_name[200];
char h5_basename[200];
char outdirname[200];
if(save_hdf5)
{
sprintf(h5_basename, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing);
}
else if(save_binary_file)
{
sprintf(outdirname, "output_%d_%d_%d_%d_%d_bin", h, size_x, size_y, size_z, use_failing);
}
else
{
sprintf(outdirname, "output_%d_%d_%d_%d_%d", h, size_x, size_y, size_z, use_failing);
}
if(!my_id)
{
if(save_data && !save_hdf5){
if(access(outdirname,0))
{
if (mkdir(outdirname, 0755)==-1)
{
printf("make directory failed\n");
}
else
{
printf("make directory: %s\n", outdirname);
}
}
else
{
printf("directory %s existed\n",outdirname);
}
}
}
MPI_Barrier(comm);
if((my_id==0) && (nproc!=(x_domains*y_domains*z_domains))) {
printf("Number of processes not equal to Number of subdomains\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
if((my_id==0)&&(size_x%x_domains!=0)) {
printf("Number of x_domains is not divisible in scale\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
if((my_id==0)&&(size_y%y_domains!=0)) {
printf("Number of y_domains is not divisible in scale\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
if((my_id==0)&&(size_z%z_domains!=0)) {
printf("Number of z_domains is not divisible in scale\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
if(((size_y/y_domains)%div_y)!=0){
div_y=1;
if(my_id==0){
printf("Warning: div_y is not divisible on each node, so set div_y=1 for default \n");
}
}
/* Create 3D cartesian grid */
periods[0] = 0;
periods[1] = 0;
periods[2] = 0;
ndims = 3;
dims[0]=z_domains;
dims[1]=y_domains;
dims[2]=x_domains;
MPI_Cart_create(comm, ndims, dims, periods, reorganisation, &comm3d);
/* MPI variables */
MPI_Status ar_status[6];
MPI_Request ar_send_req[6];
MPI_Request ar_recv_req[6];
int coord[3];
int dim[3];
int period[3];
int mid_coord_x=0;
int in_midx_slice=0;
int x_slice_num;
int x_slice_width;
int x_slice_mid;
MPI_Cart_get(comm3d, 3, dim, period, coord);
x_slice_num=(int)(ceil((double)(size_x*h)/2100.0));
if((size_x%x_slice_num)!=0)
{
printf("x dimension can not be divided by %d\n", x_slice_num);
MPI_Abort(comm,5);
}
x_slice_width=size_x/x_slice_num;
x_slice_mid=(x_slice_width+1)/2;
for(i=0;i<x_slice_num;i++)
{
if(((x_slice_width*i+x_slice_mid)>=(coord[2]*size_x/x_domains))&&
((x_slice_width*i+x_slice_mid)<((coord[2]+1)*size_x/x_domains))){
if(in_midx_slice==1){
printf("dont put two x_slice in a x partition\n");
MPI_Abort(comm,5);
}
in_midx_slice=1;
mid_coord_x=(x_slice_width*i+x_slice_mid)-(coord[2]*size_x/x_domains)+1;//+1 for ghost bound
//check x partition thickness, so far, for simplify, dont cut a csqn and no-flux into two x-partitions
if((mid_coord_x)<(h_scale+3)||(size_x/x_domains-mid_coord_x)<(h_scale+3)){
printf("x partition is too thine for CSQN and cleft extend \n");
MPI_Abort(comm,5);
}
}
}
//printf("Rank: %d, coord: [%d, %d, %d]\n", my_id, coord[0], coord[1], coord[2]);
/* Identify process neighbors */
NeighBor[0] = MPI_PROC_NULL;
NeighBor[1] = MPI_PROC_NULL;
NeighBor[2] = MPI_PROC_NULL;
NeighBor[3] = MPI_PROC_NULL;
NeighBor[4] = MPI_PROC_NULL;
NeighBor[5] = MPI_PROC_NULL;
/* Left/West and right/Est neigbors Z direction*/
MPI_Cart_shift(comm3d,0,1,&NeighBor[ZN],&NeighBor[ZP]);
/* Bottom/South and Upper/North neigbors Y direction*/
MPI_Cart_shift(comm3d,1,1,&NeighBor[YN],&NeighBor[YP]);
/* Zdown/South and Zup/North neigbors X direction*/
MPI_Cart_shift(comm3d,2,1,&NeighBor[XN],&NeighBor[XP]);
//--------------------------------------------------------------------
int nx=(size_x/x_domains);
int ny=(size_y/y_domains);
int nz=(size_z/z_domains);
int nx0, ny0, nz0;
int nx1, ny1, nz1;
nx0=nx+2;
ny0=ny+2;
nz0=nz+2;
nx1=nx+2;
ny1=ny+2;
nz1=nz+2;
int len;
len=nx0*ny0*nz0;
/* Create matrix data types to communicate */
MPI_Type_vector(ny, nz, nz0, MPI_DOUBLE, &matrix_type_oyz);
MPI_Type_commit(&matrix_type_oyz);
/* Create matrix data type to communicate on vertical Oxz plan */
MPI_Type_vector(nx, nz, ny0*nz0, MPI_DOUBLE, &matrix_type_oxz);
MPI_Type_commit(&matrix_type_oxz);
/* Create matrix data type to communicate on vertical Oxy plan */
MPI_Datatype matrix_type_liney;
MPI_Type_vector(ny, 1, nz0, MPI_DOUBLE, &matrix_type_liney);
MPI_Type_commit(&matrix_type_liney);
// MPI_Type_vector(nx*ny, 1, nz0, MPI_DOUBLE, &matrix_type_oxy);
MPI_Type_hvector(nx, 1, ny0*nz0*sizeof(double), matrix_type_liney, &matrix_type_oxy);
MPI_Type_commit(&matrix_type_oxy);
if(!my_id)
printf("Simulation Begin!\n");
//Define where the RyRs are:
int* i0_ryr;
int* i1_ryr;
int* i2_ryr;
int* i0_csqn;
int* i1_csqn;
int* i2_csqn;
int* i0_cleft;
int* i1_cleft;
int* i2_cleft;
int* cleft_nb;
int ryr_len;
int csqn_len;
int cleft_len;
int* states0;
int* states1;
h_scale=distr_ryr_csqn_state( h, size_x, size_y, size_z, nx, ny, nz,
&i0_ryr, &i1_ryr, &i2_ryr, &ryr_len,
&i0_csqn, &i1_csqn, &i2_csqn, &csqn_len,
&i0_cleft, &i1_cleft, &i2_cleft, &cleft_nb,&cleft_len,
&states0, &states1,
x_slice_mid,x_slice_width, x_slice_num,
comm3d, comm, use_failing);
// store2Dmatrixfile_int_1D("i0.txt",i0,n_ryr,1);
// store2Dmatrixfile_int_1D("i1.txt",i1,n_ryr,1);
// store2Dmatrixfile_int_1D("i2.txt",i2,n_ryr,1);
double Vfraction;
//first set the numbers of RyR in a CaRU;
//All CaRU placed mid-sarcomere
Vfraction=(30.0/h)*(30.0/h)*(30.0/h); // scaling of RyR when changing dx
// Set constants and dt based on these:
double D_i=250e3; // 220e3
double D_SR=73e3; // 73.3e3;
double D_ATP=140e3;
double D_CMDN=22e3;
double D_Fluo=42e3;
double dt=(1./6)*h*h/D_i;
double alpha_i = dt*D_i/(h*h);
double Ca0 = 140e-3;
double CaSR0 = 1.3e3;
double* Ca_i;
Ca_i=(double*)malloc(len*sizeof(double));
for ( i = 0; i < len; i += 1 ) {
Ca_i[i]=Ca0;
}
double alpha_SR = dt*D_SR/(h*h);
double* Ca_SR;
Ca_SR=(double*)malloc(len*sizeof(double));
for ( i = 0; i < len; i += 1 ) {
Ca_SR[i]=CaSR0;
}
double k_on_CMDN = 34e-3;
double k_off_CMDN = 238e-3;
double CMDN_tot = 24;
double alpha_CMDN = dt*D_CMDN/(h*h);
double k_on_ATP = 255e-3;
double k_off_ATP = 45;
double ATP_tot = 455;
double alpha_ATP = dt*D_ATP/(h*h);
double k_on_Fluo = 110e-3;
double k_off_Fluo = 110e-3;
double Fluo_tot = 25; // 25;
double alpha_Fluo = dt*D_Fluo/(h*h);
double k_on_TRPN = 32.7e-3;
double k_off_TRPN = 19.6e-3; // 26.16e-3;
double TRPN_tot = 70; // 50;
double k_on_CSQN = 102e-3;
double k_off_CSQN = 65;
double CSQN_tot = 30e3;
double alpha[7];
double k_on[7];
double k_off[7];
double B_tot[7];
alpha[0]=alpha_i; alpha[1]=alpha_SR; alpha[2]=alpha_CMDN; alpha[3]=alpha_ATP; alpha[4]=alpha_Fluo; alpha[5]=0; alpha[6]=0;
k_on[0]=0 ; k_on[1]= 0; k_on[2]= k_on_CMDN; k_on[3]=k_on_ATP ; k_on[4]=k_on_Fluo ; k_on[5]=k_on_TRPN; k_on[6]=k_on_CSQN;
k_off[0]=0 ; k_off[1]= 0; k_off[2]=k_off_CMDN; k_off[3]=k_off_ATP; k_off[4]=k_off_Fluo; k_off[5]=k_off_TRPN; k_off[6]=k_off_CSQN;
B_tot[0]=0 ; B_tot[1]= 0; B_tot[2]=CMDN_tot ; B_tot[3]=ATP_tot ; B_tot[4]=Fluo_tot ; B_tot[5]=TRPN_tot; B_tot[6]=CSQN_tot;
// Calculate steady state IC for the buffers based on Ca_i ...
double Ca_CMDN0=B_tot[2]*Ca0/(Ca0+k_off[2]/k_on[2]);
double Ca_ATP0 =B_tot[3]*Ca0/(Ca0+k_off[3]/k_on[3]);
double Ca_Fluo0=B_tot[4]*Ca0/(Ca0+k_off[4]/k_on[4]);
double Ca_TRPN0=B_tot[5]*Ca0/(Ca0+k_off[5]/k_on[5]);
// and Ca_SR:
double Ca_CSQN0 = CSQN_tot*Ca_SR[0]/(Ca_SR[0] + k_off_CSQN/k_on_CSQN);
double init_values[7] = {Ca0, CaSR0, Ca_CMDN0, Ca_ATP0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0};
//printf("%f %f %f %f %f \n ", Ca_ATP0, Ca_CMDN0, Ca_Fluo0, Ca_TRPN0, Ca_CSQN0);
if(my_id==0)
printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f outfilenum:%d, x_slice_num:%d, use_failing:%d, div_y:%d, save_binary:%d \n",
h, size_x, size_y, size_z,dt,T,
(int)(T/dt),DT,(int)(T/DT)*save_data,x_slice_num,use_failing,
div_y,save_binary_file);
// Allocate the data structure for the solution
double *Ca_ATP ;
double *Ca_CMDN ;
double *Ca_Fluo ;
double *Ca_TRPN ;
double *Ca_CSQN ;
Ca_ATP =(double*)malloc(len*sizeof(double));
Ca_CMDN=(double*)malloc(len*sizeof(double));
Ca_Fluo=(double*)malloc(len*sizeof(double));
Ca_TRPN=(double*)malloc(len*sizeof(double));
Ca_CSQN=(double*)malloc(len*sizeof(double));
for ( i = 0; i < len; i += 1 ) {
Ca_ATP[i] = Ca_ATP0;
Ca_CMDN[i] = Ca_CMDN0;
Ca_Fluo[i] = Ca_Fluo0;
Ca_TRPN[i] = Ca_TRPN0;
Ca_CSQN[i] = Ca_CSQN0;
}
double* C0[7];
double* C1[7];
double* C_temp;
C0[0]=(double*)malloc(len*sizeof(double));
C1[0]=Ca_i;
memcpy(C0[0],C1[0],len*sizeof(double));
C0[1]=(double*)malloc(len*sizeof(double));
C1[1]=Ca_SR;
memcpy(C0[1],C1[1],len*sizeof(double));
C0[2]=(double*)malloc(len*sizeof(double));
C1[2]=Ca_CMDN;
memcpy(C0[2],C1[2],len*sizeof(double));
C0[3]=(double*)malloc(len*sizeof(double));
C1[3]=Ca_ATP;
memcpy(C0[3],C1[3],len*sizeof(double));
C0[4]=(double*)malloc(len*sizeof(double));
C1[4]=Ca_Fluo;
memcpy(C0[4],C1[4],len*sizeof(double));
C0[5]=(double*)malloc(len*sizeof(double));
C1[5]=Ca_TRPN;
memcpy(C0[5],C1[5],len*sizeof(double));
C0[6]=(double*)malloc(len*sizeof(double));
C1[6]=Ca_CSQN;
memcpy(C0[6],C1[6],len*sizeof(double));
//Ca = [[Ca_i.copy(), Ca_i ],
// [Ca_SR.copy(), Ca_SR ],
// [Ca_CMDN.copy(), Ca_CMDN],
// [Ca_ATP.copy(), Ca_ATP ],
// [Ca_Fluo.copy(), Ca_Fluo],
// [Ca_TRPN, Ca_TRPN],
// [Ca_CSQN, Ca_CSQN]]
double gamma = 0.02; // SR volume fraction
int cai=0;
int sri=1;
// int cmdni=2;
// int atpi=3;
// int fluoi=4;
// int trpni=5;
int csqni=6;
double fraction[7]={1,1,1,1,1,1,1};
fraction[1]=gamma;
fraction[6]=gamma;
// Ryr conductance:
double k_s = (Vfraction)*150/2; // 1/ms, based on 0.5pA of Ca2+ into (30nm)^3.
double K = exp(-k_s*dt*(1+1/gamma)); // factor need in the integration below
if(my_id==0){
printf("dt = dt: %e\n", dt);
printf("k_s = (Vfraction)*150/2: %e\n", k_s);
printf("K = exp(-k_s*dt*(1+1/gamma)): %e\n", K);
}
double t=0;
int counter=0;
// int mean[7];
time_main-=timing();
FILE *fpdata;
char meanfile[200];
if (save_hdf5)
sprintf(meanfile,"%s_mean.txt", h5_basename);
else
sprintf(meanfile,"%s/mean.txt", outdirname);
if(!my_id){
if(save_data){
if ((fpdata=fopen(meanfile, "w"))==NULL)
{
printf("failed open output file ");
printf("%s", meanfile);
printf(" ! \n ");
exit(0);
}
}
}
// H5 Setup
if (save_hdf5)
{
char h5_data_file[200];
// Set up file access property list with parallel I/O access
// property list identifier
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info);
sprintf(h5_data_file, "%s.h5", h5_basename);
// Create a new file collectively and release property list identifier.
h5_file_id = H5Fcreate(h5_data_file, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
H5Pclose(plist_id);
const int data_rank = 2;
hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */
hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */
// Offset into dataset based on the MPI coord from MPI_Cart_get
hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny};
hsize_t h5_count[2] = {1, 1};
hsize_t data_size=ny*nz;
h5_data = (hdf5_data_type*)malloc(data_size*sizeof(hdf5_data_type));
if (!my_id)
{
printf("Total data size per species: %zu, %zu\n", dimsf[0], dimsf[1]);
printf("Total data size per chunk per species: %zu, %zu\n", chunk_dims[0], chunk_dims[1]);
}
printf("rank %d | h5 offset [%zu, %zu]\n", my_id, h5_offset[0], h5_offset[1]);
// Create data space for the datatype limits
hsize_t dims = 1;
hid_t attr_space = H5Screate_simple(1, &dims, NULL);
// Create a time attribute
hid_t limit_id = H5Acreate(h5_file_id, "data_type_size", H5T_NATIVE_DOUBLE,
attr_space, H5P_DEFAULT, H5P_DEFAULT);
// Write the attribute data
double data_type_size = (double)H5_DATA_SIZE;
herr_t status = H5Awrite(limit_id, H5T_NATIVE_DOUBLE, &data_type_size);
// Cleanup
H5Aclose(limit_id);
H5Sclose(attr_space);
// Save hard coded data ranges
for (i=0; i<NUM_SAVE_SPECIES; i++)
{
// Get species
int species = save_species[i];
// Create data scale attribute
sprintf(hdf5_dataset_name, "%s_scale", species_names[species]);
// Create data space for the species scale attribute
hsize_t dims = 1;
hid_t attr_space = H5Screate_simple(1, &dims, NULL);
// Create a time attribute
hid_t scale_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE,
attr_space, H5P_DEFAULT, H5P_DEFAULT);
// Write the attribute data
herr_t status = H5Awrite(scale_id, H5T_NATIVE_DOUBLE, &init_values[species]);
// Cleanup
H5Aclose(scale_id);
H5Sclose(attr_space);
// Create init value attribute
sprintf(hdf5_dataset_name, "%s_init", species_names[species]);
// Create data space for the species init attribute
dims = 1;
attr_space = H5Screate_simple(1, &dims, NULL);
// Create a time attribute
hid_t init_id = H5Acreate(h5_file_id, hdf5_dataset_name, H5T_NATIVE_DOUBLE,
attr_space, H5P_DEFAULT, H5P_DEFAULT);
// Write the attribute data
status = H5Awrite(init_id, H5T_NATIVE_DOUBLE, &init_values[species]);
// Cleanup
H5Aclose(init_id);
H5Sclose(attr_space);
}
}
double* yz_sbuf0; double* yz_rbuf0;
double* xz_sbuf0; double* xz_rbuf0;
double* xy_sbuf0; double* xy_rbuf0;
double* yz_sbuf1; double* yz_rbuf1;
double* xz_sbuf1; double* xz_rbuf1;
double* xy_sbuf1; double* xy_rbuf1;
yz_sbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double));
xz_sbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double));
xy_sbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double));
yz_sbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double));
xz_sbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double));
xy_sbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double));
yz_rbuf0=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double));
xz_rbuf0=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double));
xy_rbuf0=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double));
yz_rbuf1=(double*)mpi_malloc(my_id,5*ny*nz*sizeof(double));
xz_rbuf1=(double*)mpi_malloc(my_id,5*nx*nz*sizeof(double));
xy_rbuf1=(double*)mpi_malloc(my_id,5*nx*ny*sizeof(double));
#ifdef __PAPI__
if ( PAPI_start( EventSet ) != PAPI_OK){
printf("PAPI_read_counters failed\n");
}
#endif
//settime
//T=1000*dt;
//for ( T = 0; T < TimeStep; T += 1 )
int t_counter=0;
while(t<T)
//while(0)
{
t+=dt;
t_counter++;
time_comm-=timing();
updateBound(C0[0], C0[1], C0[2], C0[3], C0[4],
t_counter, nx0, ny0, nz0,
yz_sbuf0,yz_rbuf0, xz_sbuf0,xz_rbuf0, xy_sbuf0,xy_rbuf0,
yz_sbuf1,yz_rbuf1, xz_sbuf1,xz_rbuf1, xy_sbuf1,xy_rbuf1,
NeighBor, ar_status,ar_send_req,ar_recv_req,
comm, comm3d);
time_comm+=timing();
// Diffusion update
time_conc-=timing();
// Change to use a faster computing function
compute_pde_ode(nx0, ny0, nz0, dt, gamma, 1e-4,
alpha, B_tot, k_on, k_off,
C0, C1, div_y);
// for ( i = 0; i < 5; i += 1 ) {
// laplace3D(nx0,ny0,nz0,C0[i],nx1,ny1,nz1,C1[i],alpha[i]);
// }
// for ( i = 2; i < 6; i += 1 ) {
// reaction3D(nx1,ny1,nz1,C1[cai],nx1,ny1,nz1,C1[i],B_tot[i],k_on[i],k_off[i],dt);
// }
// serca3D(nx1,ny1,nz1, C1[cai],nx1,ny1,nz1, C1[sri], dt, gamma, 1.0);
time_conc+=timing();
// Update at RyRs, one at the time
time_ryr-=timing();
update_ryr(h_scale, nx0, ny0, nz0, C1[cai], C1[sri], C1[csqni],
C1[0],C1[2],C1[3],C1[4],
k_on_CSQN, k_off_CSQN,CSQN_tot,
gamma, K, dt,
ryr_len, i0_ryr, i1_ryr, i2_ryr,
csqn_len, i0_csqn, i1_csqn, i2_csqn,
cleft_len, i0_cleft, i1_cleft, i2_cleft,cleft_nb,
states0, states1);
time_ryr+=timing();
double sum_c_i_root[7];
double sum_c_i[7];
double cai_min;
double cai_min_root=0.0;
double cai_max;
double cai_max_root=1.0;
double sm;
double ca[8];
char caoutfile[100];
if ((fmod(t,DT)<dt)||(t==dt)){
time_io-=timing();
for(idx=0; idx<7; idx++){
sum_c_i[idx]=0.0;
for ( i = 1; i <= nx; i += 1 )
for ( j = 1; j <= ny; j += 1 )
for ( k = 1; k <= nz; k += 1 )
sum_c_i[idx]+=C1[idx][i*ny0*nz0+j*nz0+k];
}
cai_min=my_min(C1[cai],len);
cai_max=my_max(C1[cai],len);
/* reduce operation comm*/
MPI_Reduce(&sum_c_i[0], &sum_c_i_root[0], 7, MPI_DOUBLE, MPI_SUM, 0, comm);
MPI_Reduce(&cai_min, &cai_min_root, 1, MPI_DOUBLE, MPI_MIN, 0, comm);
MPI_Reduce(&cai_max, &cai_max_root, 1, MPI_DOUBLE, MPI_MAX, 0, comm);
if(!my_id){
sm = 0;
ca[0] = t;
if(save_data) fprintf(fpdata,"%f ", ca[0]);
for(idx=0; idx<7; idx++){
sm += fraction[idx]*sum_c_i_root[idx];
ca[idx+1] = sum_c_i_root[idx]/((double)nx*x_domains*(double)ny*y_domains*(double)nz*z_domains);
if(DB_PF){
printf("ca[%d]: %f , sum : %f, nx ny nz: %d %d %d \n",idx+1, ca[idx+1],
sum_c_i_root[idx],nx*x_domains,ny*y_domains,nz*z_domains);
}
if(save_data) fprintf(fpdata,"%f ", ca[idx+1]);
}
if(save_data) fprintf(fpdata,"\n ");
printf("%3d, %.3f, %3.2f, %7.2f, %3.2f, %4.2f, %.2f \n",
counter, t, ca[1], ca[2], cai_min_root, cai_max_root, sm);
}
if(save_data && in_midx_slice)
{
// If saving in hdf5
if (save_hdf5)
{
hsize_t dimsf[2] = {size_y, size_z}; /* dataset dimensions */
hsize_t chunk_dims[2] = {ny, nz}; /* chunk dimensions */
hsize_t h5_offset[2] = {coord[1]*nz, coord[0]*ny};
hsize_t h5_count[2] = {1, 1};
// Create group name
sprintf(hdf5_group_name, "/data_%d", counter);
hid_t group_id = H5Gcreate(h5_file_id, hdf5_group_name,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
// Create data space for the time attribute
hsize_t dims = 1;
hid_t attr_space = H5Screate_simple(1, &dims, NULL);
// Create a time attribute
hid_t time_id = H5Acreate(group_id, "time", H5T_NATIVE_DOUBLE, attr_space,
H5P_DEFAULT, H5P_DEFAULT);
// Write the attribute data
double time_data = counter*DT;
herr_t status = H5Awrite(time_id, H5T_NATIVE_DOUBLE, &time_data);
// Cleanup
H5Aclose(time_id);
H5Sclose(attr_space);
for (i=0; i<NUM_SAVE_SPECIES; i++)
{
// Get species
int species = save_species[i];
sprintf(hdf5_dataset_name, "%s/%s", hdf5_group_name, species_names[species]);
// file and dataset identifiers
hid_t filespace = H5Screate_simple(2, dimsf, NULL);
hid_t memspace = H5Screate_simple(2, chunk_dims, NULL);
// Create chunked dataset.
hid_t plist_id = H5Pcreate(H5P_DATASET_CREATE);
H5Pset_chunk(plist_id, 2, chunk_dims);
// Create compression filter (Not supported in parallel yet...)
//unsigned int gzip_level = 9;
//herr_t status = H5Pset_filter(plist_id, H5Z_FILTER_DEFLATE,
// H5Z_FLAG_OPTIONAL, 1, &gzip_level);
hid_t dset_id = H5Dcreate(h5_file_id, hdf5_dataset_name,
H5T_DATA_TYPE, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
H5Pclose(plist_id);
H5Sclose(filespace);
// Select hyperslab in the file.
filespace = H5Dget_space(dset_id);
status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
h5_offset, NULL, h5_count, chunk_dims);
// Copy data to h5_data
transfer_hdf5_data(h5_data,
&(C0[species][ny0*nz0*mid_coord_x]),
&(C1[species][ny0*nz0*mid_coord_x]),
init_values[species], chunk_dims);
// Create property list for collective dataset write.
plist_id = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
status = H5Dwrite(dset_id, H5T_DATA_TYPE, memspace, filespace,
plist_id, h5_data);
// Close/release resources.
H5Dclose(dset_id);
H5Sclose(filespace);
H5Sclose(memspace);
H5Pclose(plist_id);
}
H5Gclose(group_id);
}
// No HDF5
else
{
// Get species
int species = save_species[i];
for (i=0; i<NUM_SAVE_SPECIES; i++)
{
sprintf(caoutfile, "%s/Ca%d_T%d_rank%d_%d_%d.np", outdirname, species, counter,
coord[2], coord[1], coord[0]);
if(save_binary_file)
store2Dmatrixfile_double_bin(caoutfile, C1[species], ny0, nz0, mid_coord_x);
else
store2Dmatrixfile_double_1D(caoutfile, C1[species], ny0, nz0, mid_coord_x);
}
}
}
counter += 1;
}
// # Update Ca
for(i=0;i<7;i++){
C_temp=C0[i];
C0[i]=C1[i];
C1[i]=C_temp;
}
MPI_Waitall(6, ar_send_req, ar_status);
}
time_main+=timing();
if(my_id==0){
if(save_data) fclose(fpdata);
printf("cubiod_c: h:%d size_x:%d size_y:%d size_z:%d dt:%f, T:%f, TimeStep:%d, DT:%f, x_slice_num:%d\n",
h, size_x, size_y, size_z,dt,T,(int)(T/dt),DT,x_slice_num);
printf("nx0:%d ny0:%d nz0:%d size/array:%7.3f MB total size:%7.3f MB\n",
nx0,ny0,nz0,nx0*ny0*nz0*8*1e-6,nx0*ny0*nz0*8*1e-6*12);
#ifdef __PAPI__
if ( PAPI_stop( EventSet, res_papi ) != PAPI_OK){
printf("PAPI_accum_counters failed\n");
}
for (i = 0; i<NUM_EVENTS; i++){
PAPI_event_code_to_name(Events[i], EventName);
printf("PAPI Event name: %s, value: %lld\n", EventName, res_papi[i]);
}
#endif
printf("computing time: %7.3f \n", time_conc);
printf("updateryr time: %7.3f \n", time_ryr);
printf("communica time: %7.3f \n", time_comm);
printf("main time: %7.3f \n", time_main);
#ifdef __PAPI__
printf("PAPI Performanc/core: %7.3f GFLOPS\n", res_papi[0]/1e9/time_conc);
#endif
}
if (save_hdf5)
{
H5Fclose(h5_file_id);
free(h5_data);
}
for(i=0;i<5;i++){
free(C0[i]);
free(C1[i]);
}
free(C0[6]);
free(C0[5]);
free(i0_ryr);
free(i1_ryr);
free(i2_ryr);
free(i0_csqn);
free(i1_csqn);
free(i2_csqn);
free(i0_cleft);
free(i1_cleft);
free(i2_cleft);
free(cleft_nb);
MPI_Finalize();
return 0;
}
void laplace3D (int nx0, int ny0, int nz0, double* C0,
int nx1, int ny1, int nz1, double* C1,
double alpha)//, int num_threads)
{
// Set num threads
// omp_set_num_threads(num_threads);
// Local variables
int i, j, k;
double C0_tmp;
// Main kernel loop
// #pragma omp parallel for private(i, j, k, C0_tmp) //collapse(3)
for (i=1; i<nx0-1; i++)
{
for (j=1; j<ny0-1; j++)
{
for (k=1; k<nz0-1; k++)
{
// Main kernel
C0_tmp = -6*C0[i*nz0*ny0+j*nz0+k] +
C0[(i-1)*nz0*ny0+j*nz0+k] + C0[(i+1)*nz0*ny0+j*nz0+k] +
C0[i*nz0*ny0+(j-1)*nz0+k] + C0[i*nz0*ny0+(j+1)*nz0+k] +
C0[i*nz0*ny0+j*nz0+k-1] + C0[i*nz0*ny0+j*nz0+k+1];
// Put value back into return array with offset to indices
C1[i*nz1*ny1+j*nz1+k] = C0[i*nz1*ny1+j*nz1+k] + C0_tmp*alpha;
}
}
}
}
void reaction3D (int nx0, int ny0, int nz0, double* Ca,
int nx1, int ny1, int nz1, double* buff,
double B_tot, double k_on, double k_off, double dt)//, int num_threads)
{
// Set num threads
// omp_set_num_threads(num_threads);
// Local variables
int i, j, k;
double J;
// Use pointers reducing indexing into memory to once
double* Ca_ijk;
double* buff_ijk;
// Main kernel loop
// #pragma omp parallel for private(i, j, k, J, Ca_ijk, buff_ijk) //collapse(3)
for (i=1; i<nx0-1; i++)
{
for (j=1; j<ny0-1; j++)
{
for (k=1; k<nz0-1; k++)
{
// Main kernel
Ca_ijk = &Ca[i*nz0*ny0+j*nz0+k];
buff_ijk = &buff[i*nz0*ny0+j*nz0+k];
J = k_on*(B_tot - *buff_ijk)*(*Ca_ijk) - k_off*(*buff_ijk);
*Ca_ijk -= dt*J;
*buff_ijk += dt*J;
}
}
}
}
void serca3D (int nx0, int ny0, int nz0, double* Ca_i,
int nx1, int ny1, int nz1, double* Ca_SR,
double dt, double gamma, double fudge)//, int num_threads)
{
// Set num threads
// omp_set_num_threads(num_threads);
// Local variables
int i, j, k;
double J;
// Use pointers reducing indexing into memory to once
double Ca_i2_ijk;
double Ca_SR2_ijk;
// Main kernel loop
// #pragma omp parallel for private(i, j, k, J, Ca_i2_ijk, Ca_SR2_ijk) //collapse(3)
for (i=1; i<nx0-1; i++)
{
for (j=1; j<ny0-1; j++)
{
for (k=1; k<nz0-1; k++)
{
// Main kernel
Ca_i2_ijk = Ca_i[i*nz0*ny0+j*nz0+k];
Ca_SR2_ijk = Ca_SR[i*nz0*ny0+j*nz0+k];
Ca_i2_ijk *= Ca_i2_ijk;
Ca_SR2_ijk *= Ca_SR2_ijk;
J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879);
Ca_i[i*nz0*ny0+j*nz0+k] -= dt*J;
Ca_SR[i*nz0*ny0+j*nz0+k] += dt*J/gamma;
}
}
}
}
void update_ryr(int h_scale,int nx0, int ny0, int nz0, double* Ca_i, double* Ca_SR, double* Ca_CSQN,
double* C10, double* C12, double* C13, double* C14,
double k_on_CSQN, double k_off_CSQN, double CSQN_tot,
double gamma, double K, double dt,
int ryr_len, int* i0_ryr, int* i1_ryr, int* i2_ryr,
int csqn_len, int* i0_csqn, int* i1_csqn, int* i2_csqn,
int cleft_len, int* i0_cleft, int* i1_cleft, int* i2_cleft,int* cleft_nb,
int* states0, int* states1)
{
int i,j;
int x_copy_from;
int x,y,z;
int nb_y,nb_z;
int idx,idx_cleft,idx_csqn;
double J;
int open;
double c0,c1;
//extend csqn on x direction
// for(j=(1-h_scale);j<h_scale;j++){
//extend cdqn on x+ direction for 30nm
for(j=0;j<h_scale;j++){
for(i=0;i<csqn_len;i+=1){
x=i0_csqn[i]+j;
#ifdef DEBUG_TEST
if((x<0)||x>(nx0-1))
{
printf("wrong csqn x index\n");
exit(0);
}
#endif
y=i1_csqn[i];
z=i2_csqn[i];
idx=x*ny0*nz0+y*nz0+z;
//CSQN step:
J = k_on_CSQN*(CSQN_tot - Ca_CSQN[idx])*Ca_SR[idx] - k_off_CSQN*Ca_CSQN[idx];
Ca_SR[idx] -= dt*J;
Ca_CSQN[idx] += dt*J;
}
}
//add no_flux boundary by copy the neighbour's value on no_flux voxel
//add x+ front no-flux plane on ryr with +1 offset, and copy from -1 x-plane(where ryr is on)
j=1;
x_copy_from=-1;
for(i=0;i<csqn_len;i+=1){
x=i0_csqn[i]+j;
#ifdef DEBUG_TEST
if((x<0)||x>(nx0-1))
{
printf("wrong csqn x index\n");
exit(0);
}
#endif
y=i1_csqn[i];
z=i2_csqn[i];
idx_cleft=x*ny0*nz0+y*nz0+z;
idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z;
C10[idx_cleft]=C10[idx_csqn];
C12[idx_cleft]=C12[idx_csqn];
C13[idx_cleft]=C13[idx_csqn];
C14[idx_cleft]=C14[idx_csqn];
}
//add x+ back no-flux plane on ryr with h_scale offset, and copy from +1 x-plane(outside of csqn)
if(h_scale==2)//15 nm
j=h_scale+1;//guarantee that there is at least one voxel inner the no-flux boundary
else//5nm 3mn 1nm,
j=h_scale;
x_copy_from=+1;
for(i=0;i<csqn_len;i+=1){
x=i0_csqn[i]+j;
#ifdef DEBUG_TEST
if((x<0)||x>(nx0-1))
{
printf("wrong csqn x index\n");
exit(0);
}
#endif
y=i1_csqn[i];
z=i2_csqn[i];
idx_cleft=x*ny0*nz0+y*nz0+z;
idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z;
C10[idx_cleft]=C10[idx_csqn];
C12[idx_cleft]=C12[idx_csqn];
C13[idx_cleft]=C13[idx_csqn];
C14[idx_cleft]=C14[idx_csqn];
}
//extend y-z plane no_flux boundary along x+ direction with +1 offset and copy value from outside of CSQN by cleft_nb index
int k;
if(h_scale==2)//15 nm
k=1;//guarantee that there is at least one voxel inner the no-flux boundary
else//5nm 3mn 1nm,
k=0;
for(j=2;j<h_scale+k;j++){
for(i=0;i<cleft_len;i+=1){
x=i0_cleft[i]+j;
#ifdef DEBUG_TEST
if((x<0)||x>(nx0-1))
{
printf("wrong csqn x index\n");
exit(0);
}
#endif
y=i1_cleft[i];
z=i2_cleft[i];
nb_y=cleft_nb[i]/8-1;
nb_z=cleft_nb[i]%8-1;
idx_cleft=x*ny0*nz0+y*nz0+z;
idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z;
C10[idx_cleft]=C10[idx_csqn];
C12[idx_cleft]=C12[idx_csqn];
C13[idx_cleft]=C13[idx_csqn];
C14[idx_cleft]=C14[idx_csqn];
}
}
//add x- front no-flux plane on ryr with -h_scale/2(15nm) offset, and copy from +1 x-plane(t-tubule)
j=0-h_scale/2;
x_copy_from=1;
for(i=0;i<csqn_len;i+=1){
x=i0_csqn[i]+j;
#ifdef DEBUG_TEST
if((x<0)||x>(nx0-1))
{
printf("wrong csqn x index\n");
exit(0);
}
#endif
y=i1_csqn[i];
z=i2_csqn[i];
idx_cleft=x*ny0*nz0+y*nz0+z;
idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z;
C10[idx_cleft]=C10[idx_csqn];
C12[idx_cleft]=C12[idx_csqn];
C13[idx_cleft]=C13[idx_csqn];
C14[idx_cleft]=C14[idx_csqn];
}
//add x- back no-flux plane on ryr with -h_scale/2+1 offset, and copy from -1 x-plane(t-tubule)
/* if(h_scale=2)
j=0-h_scale/2-h_scale;
else
j=0-h_scale/2-h_scale+1;
*/
/* how thick should t-tubule be? now, just set it 2 lines on x- direction */
// j=0-h_scale/2-h_scale-1;
j=0-h_scale/2-1;
x_copy_from=-1;
for(i=0;i<csqn_len;i+=1){
x=i0_csqn[i]+j;
#ifdef DEBUG_TEST
if((x<0)||x>(nx0-1))
{
printf("wrong csqn x index\n");
exit(0);
}
#endif
y=i1_csqn[i];
z=i2_csqn[i];
idx_cleft=x*ny0*nz0+y*nz0+z;
idx_csqn =(x+x_copy_from)*ny0*nz0+y*nz0+z;
C10[idx_cleft]=C10[idx_csqn];
C12[idx_cleft]=C12[idx_csqn];
C13[idx_cleft]=C13[idx_csqn];
C14[idx_cleft]=C14[idx_csqn];
}
/* how thick should t-tubule be? */
/*
//extend y-z plane no_flux boundary along x- direction with +1 offset and copy value from outside of CSQN by cleft_nb index
int k;
if(h_scale==2)//15 nm
k=1;//guarantee that there is at least one voxel inner the no-flux boundary
else//5nm 3mn 1nm,
k=0;
for(j=0-h_scale/2-1;j>0-h_scale/2-h_scale+1-k;j--){
for(i=0;i<cleft_len;i+=1){
x=i0_cleft[i]+j;
#ifdef DEBUG_TEST
if((x<0)||x>(nx0-1))
{
printf("wrong csqn x index\n");
exit(0);
}
#endif
y=i1_cleft[i];
z=i2_cleft[i];
nb_y=cleft_nb[i]/8-1;
nb_z=cleft_nb[i]%8-1;
idx_cleft=x*ny0*nz0+y*nz0+z;
idx_csqn =x*ny0*nz0+(y+nb_y)*nz0+z+nb_z;
C10[idx_cleft]=C10[idx_csqn];
C12[idx_cleft]=C12[idx_csqn];
C13[idx_cleft]=C13[idx_csqn];
C14[idx_cleft]=C14[idx_csqn];
}
}
*/
for ( i = 0; i < ryr_len; i += 1 ) {
x=i0_ryr[i];
y=i1_ryr[i];
z=i2_ryr[i];
idx=x*ny0*nz0+y*nz0+z;
// #Continous formulation
// #states[:,i] += dt*stern(t, states[:,i], Ca_i[idx])
stern_discrete(dt, &states0[i],&states1[i], Ca_i[idx]);
open = states0[i]*(1-states1[i]);
// #Exp Euler:
// #J_RyR = k*open*(Ca_SR[idx]-Ca_i[idx])
// #Ca_i[idx] += dt*J_RyR
// #Ca_SR[idx] -= dt*J_RyR/gamma;
// #Analytical update:
// K = exp(-k_s*dt*(1+1/gamma))
if (open){
if(DB_PF) printf("open [%d] ryr[%d,%d,%d] \n", i, x, y,z);
c0 = (Ca_i[idx] + gamma*Ca_SR[idx])/(1+gamma);
c1 = (Ca_i[idx] - Ca_SR[idx])/(1+1/gamma);
Ca_i[idx] = c0 + c1*K;
Ca_SR[idx] = c0 - c1*K/gamma;
}
}
}
void stern(double t, double* y0, double* y1, double Ca){
double m = *y0;
double h = *y1;
double kim = 0.005;
double kom = 0.06;
double K_i = 0.01*10;
double K_o = 0.01*41.4;
double ki = kim/K_i;
double ko = kom/(K_o*K_o);
double dm = ko*Ca*Ca*(1-m)-kom*m;
double dh = ki*Ca*(1-h)-kim*h;
*y0=dm;
*y1=dh;
}
void stern_discrete(double dt, int* y0, int* y1, double Ca){
double kim = 0.002; // 1/ms
double kom = 1.5; // 0.5 1/ms
double kd_i = 20.0; // 20.0 um*ms
double kd_o = 0.9; // um*ms^N 0.7, 0.8, 0.9, 1.0
double Ca_ki = Ca/kd_i;
double Ca_ko = Ca/kd_o;
double ki = Ca_ki*Ca_ki; // (Ca/kd_i)^2
double ko = Ca_ko*Ca_ko*Ca_ko*Ca_ko; // ko = (Ca/kd_o)^4
//double kim = 0.005; // Original: 0.005
//double kom = 0.04; // Original: 0.06
//double ki = Ca*1.5*1e-3; // Original: Ca*0.5*1e-3
//double ko = 1e-6*Ca*Ca*3500; // Original: 1e-6*Ca*Ca*{35,1200,2000,3500}
double r;
int m, h;
m = *y0;
if(m==1){
r = my_random();
m = 1 - (r<(dt*kom));
}
else
{
r=my_random();
m = 1*(r<(dt*ko));
}
h = *y1;
if(h==1){
r = my_random();
h = 1 - (r<(dt*kim));
}
else{
r = my_random();
h = 1*(r<(dt*ki));
}
*y0=m;
*y1=h;
}
inline double my_random()
{
double r;
double x;
// r=(double)(rand()%100000000);
// x=(r*1e-8);
x=((double)rand())/(double)RAND_MAX;
return x;
}
void store2Dmatrixfile_double_1D(char* outfile, double* ar, int rows, int cols, int x_strid){
FILE *fpdata;
int i,j;
if ((fpdata=fopen(outfile, "w"))==NULL)
{
printf("fialed open output file ");
printf("%s",outfile);
printf(" ! \n ");
exit(0);
}
// printf("----Generating list output to ");
// printf("%s",outfile);
// printf(" file----\n");
for(i=0;i<rows;i++)
{
for(j=0;j<cols;j++)
{
fprintf(fpdata,"%.9e ", ar[x_strid*rows*cols+i*cols+j]);
}
fprintf(fpdata,"\n");
}
fclose(fpdata);
return;
}
void store2Dmatrixfile_double_bin(char* outfile, double* ar, int rows, int cols, int x_strid)
{
FILE *fpdata;
int i,j;
if ((fpdata=fopen(outfile, "wb"))==NULL)
{
printf("failed open output file ");
printf("%s",outfile);
printf(" ! \n ");
exit(0);
}
fwrite(&ar[x_strid*rows*cols],sizeof(double),rows*cols,fpdata);
fclose(fpdata);
return;
}
void transfer_hdf5_data(hdf5_data_type* h5_data, double* ar0, double* ar1,
double scale_value, hsize_t* chunk_dims)
{
int i,j;
int rows=chunk_dims[0];
int cols=chunk_dims[1];
// Transfer data from padded ar to stripped data
for(i=0;i<rows;i++)
{
for(j=0;j<cols;j++)
{
double rel_data_diff = (ar1[i*(cols+2)+j+1]-ar0[i*(cols+2)+j+1])/scale_value;
h5_data[i*cols+j] = (hdf5_data_type)round(rel_data_diff*H5_DATA_LIMIT_1);
}
}
}
void store2Dmatrixfile_int_1D(char* outfile, int* ar, int rows, int cols){
FILE *fpdata;
int i,j;
if ((fpdata=fopen(outfile, "w"))==NULL)
{
printf("failed open output file ");
printf("%s",outfile);
printf(" ! \n ");
exit(0);
}
printf("----Generating list output to ");
printf("%s",outfile);
printf(" file----\n");
for(i=0;i<rows;i++)
{
for(j=0;j<cols;j++)
fprintf(fpdata,"%d ",ar[i*cols+j]);
fprintf(fpdata,"\n");
}
fclose(fpdata);
return;
}
double my_min(double* ar, int len)
{
double min=ar[0];
int i;
for ( i = 0; i < len; i += 1 ) {
if(ar[i]<min) min=ar[i];
}
return min;
}
double my_max(double* ar, int len)
{
double max=ar[0];
int i;
for ( i = 0; i < len; i += 1 ) {
if(ar[i]>max) max=ar[i];
}
return max;
}
double timing(){
double time;
struct timeval timmer;
gettimeofday(&timmer,NULL);
time = 1000000*timmer.tv_sec + timmer.tv_usec;
time /= 1000000;
return time;
}
int load_indices_serial(int nx, int ny, int nz, int h,
int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len,
int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len,
int** i0_cleft, int** i1_cleft, int** i2_cleft, int** cleft_nb, int* cleft_len,
int x_slice_mid, int x_slice_width, int x_slice_num, int use_failing)
{
int i,j,k;
int nx_old;
int ny_old;
int nz_old;
nx_old=nx;
ny_old=ny;
nz_old=nz;
// Scale nx, xy, nz in terms of RyR
if(30%h!=0){
printf("30 must be divisible by h!");
exit(1);
}
int h_scale;
h_scale = 30/h;
nx = nx/h_scale;
ny = ny/h_scale;
nz = nz/h_scale;
// All CaRU placed mid-sarcomere
// int mid_x = (nx+1)/2;
// load RyR indices from file
int* i1;
int* i2;
int i1_len;
int i2_len;
char i_RyR_indices_name[200];
char j_RyR_indices_name[200];
sprintf(i_RyR_indices_name, "i_RyR_indices%s.dat", use_failing ? "_failing" : "");
sprintf(j_RyR_indices_name, "j_RyR_indices%s.dat", use_failing ? "_failing" : "");
if (use_failing)
printf("Load failing indices");
else
printf("Load normal indices");
i1=loadRyRindexfile_int(i_RyR_indices_name, &i1_len);
i2=loadRyRindexfile_int(j_RyR_indices_name, &i2_len);
// # Only use the subset which are inside the geometry
if(i1_len==i2_len)
printf("num RyR before reduction: %d\n", i1_len);
else
printf("num RyR is wrong: i1_len!=i2_len\n");
int* i1_temp;
int* i2_temp;
int i1_temp_len=0;
for ( i = 0; i < i1_len; i += 1 ) {
if(i1[i]<ny) i1_temp_len++;
}
i1_temp=malloc(i1_temp_len*sizeof(int));
i2_temp=malloc(i1_temp_len*sizeof(int));
j=0;
for ( i = 0; i < i1_len; i += 1 ) {
if(i1[i]<ny){
i1_temp[j]=i1[i];
i2_temp[j]=i2[i];
j++;
}
}
free(i1);
free(i2);
int i1_ryr_len=0;
for ( i = 0; i < i1_temp_len; i += 1 ) {
if(i2_temp[i]<nz) i1_ryr_len++;
}
*i0_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int));
*i1_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int));
*i2_ryr=malloc(x_slice_num*i1_ryr_len*sizeof(int));
j=0;
for ( i = 0; i < i1_temp_len; i += 1 ) {
if(i2_temp[i]<nz){
for(k=0; k < x_slice_num; k++){
(*i1_ryr)[k*i1_ryr_len+j]=i1_temp[i];
(*i2_ryr)[k*i1_ryr_len+j]=i2_temp[i];
}
j++;
}
}
free(i1_temp);
free(i2_temp);
// Scale indices and move to center of macro voxel
for ( i = 0; i < i1_ryr_len; i += 1 ) {
for(k=0; k < x_slice_num; k++){
(*i0_ryr)[k*i1_ryr_len+i] = k*x_slice_width+x_slice_mid;
//for those ryr just on 0 boundary, avoid to subtracting their coords to negative
if((*i1_ryr)[k*i1_ryr_len+i]>0)
(*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2);
else
(*i1_ryr)[k*i1_ryr_len+i] = (*i1_ryr)[k*i1_ryr_len+i]*h_scale;
if((*i2_ryr)[k*i1_ryr_len+i]>0)
(*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale - floor((double)h_scale/2);
else
(*i2_ryr)[k*i1_ryr_len+i] = (*i2_ryr)[k*i1_ryr_len+i]*h_scale;
}
}
*ryr_len=i1_ryr_len*x_slice_num;
// load CSQN indices from file
char i_csqn_indices_name[200];
char j_csqn_indices_name[200];
sprintf(i_csqn_indices_name, "i_csqn_indices%s.dat", use_failing ? "_failing" : "");
sprintf(j_csqn_indices_name, "j_csqn_indices%s.dat", use_failing ? "_failing" : "");
i1 = loadRyRindexfile_int(i_csqn_indices_name, &i1_len);
i2 = loadRyRindexfile_int(j_csqn_indices_name, &i2_len);
if(i1_len==i2_len)
printf("num CSQN before reduction: %d\n", i1_len);
else
printf("num CSQN is wrong: i1_len!=i2_len\n");
//# Only use the subset which are inside the geometry
// i1_csqn = i1[i2<nz]*h_scale
// i2_csqn = i2[i2<nz]*h_scale
// i0_csqn = np.ones(len(i1_csqn), dtype=int)*mid_x*h_scale
i1_temp_len=0;
for ( i = 0; i < i1_len; i += 1 ) {
if(i1[i]<ny) i1_temp_len++;
}
i1_temp=malloc(i1_temp_len*sizeof(int));
i2_temp=malloc(i1_temp_len*sizeof(int));
j=0;
for ( i = 0; i < i1_len; i += 1 ) {
if(i1[i]<ny){
i1_temp[j]=i1[i];
i2_temp[j]=i2[i];
j++;
}
}
free(i1);
free(i2);
int i1_csqn_len=0;
for ( i = 0; i < i1_temp_len; i += 1 ) {
if(i2_temp[i]<nz) i1_csqn_len++;
}
*i0_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int));
*i1_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int));
*i2_csqn=malloc(x_slice_num*i1_csqn_len*sizeof(int));
j=0;
for ( i = 0; i < i1_temp_len; i += 1 ) {
if(i2_temp[i]<nz){
for(k=0; k < x_slice_num; k++){
(*i1_csqn)[k*i1_csqn_len+j]=i1_temp[i];
(*i2_csqn)[k*i1_csqn_len+j]=i2_temp[i];
}
j++;
}
}
free(i1_temp);
free(i2_temp);
// Scale indices and move to center of macro voxel
for(k=0; k < x_slice_num; k++){
for ( i = 0; i < i1_csqn_len; i += 1 ) {
(*i0_csqn)[k*i1_csqn_len+i] = k*x_slice_width+x_slice_mid;
(*i1_csqn)[k*i1_csqn_len+i] = (*i1_csqn)[k*i1_csqn_len+i]*h_scale;
(*i2_csqn)[k*i1_csqn_len+i] = (*i2_csqn)[k*i1_csqn_len+i]*h_scale;
}
}
int* i0_csqn_list;
int* i1_csqn_list;
int* i2_csqn_list;
int m;
int csqn_count;
*csqn_len=x_slice_num*i1_csqn_len*h_scale*h_scale;
*cleft_len=0;//x_slice_num*i1_csqn_len*4*h_scale;
// # Add CSQN to all voxels covered by the original CSQN array
if (h_scale > 1){
i0_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int));
i1_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int));
i2_csqn_list=malloc(x_slice_num*i1_csqn_len*h_scale*h_scale*sizeof(int));
csqn_count=0;
// # Add offsetted versions of the csqn
for ( m = 0; m < x_slice_num; m += 1 ) {
for ( i = 0; i < h_scale; i += 1 ) {
for ( j = 0; j < h_scale; j += 1 ) {
for ( k = 0; k < i1_csqn_len; k += 1 ) {
i0_csqn_list[csqn_count]=(*i0_csqn)[m*i1_csqn_len+k];
i1_csqn_list[csqn_count]=(*i1_csqn)[m*i1_csqn_len+k]+i;
i2_csqn_list[csqn_count]=(*i2_csqn)[m*i1_csqn_len+k]+j;
csqn_count++;
}
}
}
}
if(csqn_count!=(*csqn_len))
{
printf("csqn_count wrong\n");
exit(0);
}
}
else
{
i0_csqn_list=(*i0_csqn);
i1_csqn_list=(*i1_csqn);
i2_csqn_list=(*i2_csqn);
}
int a_slice_csqn_len=i1_csqn_len*h_scale*h_scale;
BinarySort_two(&i1_csqn_list[0],&i2_csqn_list[0],a_slice_csqn_len);
int* y_index;
y_index=malloc(ny_old*sizeof(int));
for ( i = 0; i < ny_old; i += 1 ) {
y_index[i]=-1;
}
for ( i = a_slice_csqn_len-1; i >= 0; i -= 1 ) {
y_index[i1_csqn_list[i]]=i;
}
//generate cleft index on Y-Z plane,just wrapping the outside of a group of CSQN,
//If cleft is in the outside of the mesh or is already indexed by a CSQN, then it is not a true cleft.
//Also generate the relative coordinates for th neighbour of each cleft from which to copy the value.
//the relative coordinate of y is cleft_nb%8-1, and that of z is cleft_nb/8-1
int coord_y,coord_z;
*i1_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int));
*i2_cleft=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int));
*cleft_nb=(int*)malloc(i1_csqn_len*4*h_scale*sizeof(int));
*cleft_len=0;
for ( k = 0; k < i1_csqn_len; k += 1 ) {
for ( j = 0; j < h_scale; j += 1 ) {
//z bottom line
coord_y=(*i1_csqn)[k]-1;
coord_z=(*i2_csqn)[k]+j;
if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len))
{
(*i1_cleft)[(*cleft_len)]=coord_y;
(*i2_cleft)[(*cleft_len)]=coord_z;
//copy from outside
// (*cleft_nb)[(*cleft_len)]=0+1;
// copy from inside
(*cleft_nb)[(*cleft_len)]=16+1;
(*cleft_len)++;
}
//y left line
coord_y=(*i1_csqn)[k]+j;
coord_z=(*i2_csqn)[k]-1;
if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len))
{
(*i1_cleft)[(*cleft_len)]=coord_y;
(*i2_cleft)[(*cleft_len)]=coord_z;
//copy from inside
// (*cleft_nb)[(*cleft_len)]=8+0;
//copy from inside
(*cleft_nb)[(*cleft_len)]=8+2;
(*cleft_len)++;
}
//z top line
coord_y=(*i1_csqn)[k]+h_scale;
coord_z=(*i2_csqn)[k]+j;
if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len))
{
(*i1_cleft)[(*cleft_len)]=coord_y;
(*i2_cleft)[(*cleft_len)]=coord_z;
//copy from outside
// (*cleft_nb)[(*cleft_len)]=16+1;
// copy from inside
(*cleft_nb)[(*cleft_len)]=0+1;
(*cleft_len)++;
}
//y right line
coord_y=(*i1_csqn)[k]+j;
coord_z=(*i2_csqn)[k]+h_scale;
if(IsTrueCleft(coord_y, coord_z, ny_old, nz_old, i1_csqn_list, i2_csqn_list, y_index, a_slice_csqn_len))
{
(*i1_cleft)[(*cleft_len)]=coord_y;
(*i2_cleft)[(*cleft_len)]=coord_z;
//copy from outside
// (*cleft_nb)[(*cleft_len)]=8+2;
// copy from inside
(*cleft_nb)[(*cleft_len)]=8+0;
(*cleft_len)++;
}
}
}
if((*cleft_len)>i1_csqn_len*4*h_scale){
printf("wrong cleft_len found\n");
exit(0);
}
//add cleft for multiple 2um x-slices
int* i0_cleft_list;
int* i1_cleft_list;
int* i2_cleft_list;
int* cleft_nb_list;
i0_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int));
i1_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int));
i2_cleft_list=malloc(x_slice_num*(*cleft_len)*sizeof(int));
cleft_nb_list=malloc(x_slice_num*(*cleft_len)*sizeof(int));
for(k=0; k < x_slice_num; k++){
for ( i = 0; i < (*cleft_len); i += 1 ) {
i0_cleft_list[k*(*cleft_len)+i] = k*x_slice_width+x_slice_mid;
i1_cleft_list[k*(*cleft_len)+i] = (*i1_cleft)[i];
i2_cleft_list[k*(*cleft_len)+i] = (*i2_cleft)[i];
cleft_nb_list[k*(*cleft_len)+i] = (*cleft_nb)[i];
}
}
free(*i1_cleft);
free(*i2_cleft);
free(*cleft_nb);
*i0_cleft=i0_cleft_list;
*i1_cleft=i1_cleft_list;
*i2_cleft=i2_cleft_list;
*cleft_nb=cleft_nb_list;
*cleft_len=x_slice_num*(*cleft_len);
if (h_scale > 1){
free(*i0_csqn);
free(*i1_csqn);
free(*i2_csqn);
*i0_csqn=i0_csqn_list;
*i1_csqn=i1_csqn_list;
*i2_csqn=i2_csqn_list;
}
return h_scale;
}
int IsTrueCleft(int coord_y, int coord_z, int size_y, int size_z, int *i1_csqn, int *i2_csqn, int* y_index, int csqn_len)
{
int i;
//in outside of the mesh
if((coord_y<0)||(coord_y>=size_y)||(coord_z<0)||(coord_z>=size_z))
return 0;
i=y_index[coord_y];
//not in CSQN
if(i<0)
return 1;
while(i1_csqn[i]==coord_y){
//in CSQN
if(i2_csqn[i]==coord_z)
return 0;
i++;
//not in CSQN
if(i>=csqn_len)
return 1;
}
return 1;
}
int idxinrank(int nx, int ny, int nz,
int i0, int i1, int i2,
int rank, MPI_Comm comm3d)
{
int coords[3];
MPI_Cart_coords(comm3d,rank,3,coords);
if( (i0>=coords[2]*nx)&&((i0<coords[2]+1)*nx)&&
(i1>=coords[1]*ny)&&((i1<coords[1]+1)*ny)&&
(i2>=coords[0]*nz)&&((i2<coords[0]+1)*nz))
{
return 1;
}
else
return 0;
}
int idxbl2rank(int nx, int ny, int nz,
int i0, int i1, int i2,
int* coords,
MPI_Comm comm3d)
{
int rank=0;
coords[2]=i0/nx;
coords[1]=i1/ny;
coords[0]=i2/nz;
MPI_Cart_rank(comm3d,coords,&rank);
return rank;
}
int distr_ryr_csqn_state(int h, int size_x, int size_y, int size_z,
int nx, int ny, int nz,
int** i0_ryr, int** i1_ryr, int** i2_ryr, int* ryr_len,
int** i0_csqn, int** i1_csqn, int** i2_csqn, int* csqn_len,
int** i0_cleft, int** i1_cleft, int** i2_cleft,int** cleft_nb, int* cleft_len,
int** states0, int** states1,
int x_slice_mid,int x_slice_width, int x_slice_num,
MPI_Comm comm3d, MPI_Comm comm, int use_failing)
{
int i,j;
int h_scale;
int* global_i0_ryr;
int* global_i1_ryr;
int* global_i2_ryr;
int* global_i0_ryr_reorder;
int* global_i1_ryr_reorder;
int* global_i2_ryr_reorder;
int* global_i0_csqn;
int* global_i1_csqn;
int* global_i2_csqn;
int* global_i0_csqn_reorder;
int* global_i1_csqn_reorder;
int* global_i2_csqn_reorder;
int* global_i0_cleft;
int* global_i1_cleft;
int* global_i2_cleft;
int* global_cleft_nb;
int* global_i0_cleft_reorder;
int* global_i1_cleft_reorder;
int* global_i2_cleft_reorder;
int* global_cleft_nb_reorder;
int global_ryr_len;
int global_csqn_len;
int global_cleft_len;
int* global_states0;
int* global_states0_reorder;
int* ryr_rec_count;
int* ryr_rec_disp;
int* ryr_rec_offset;
int* csqn_rec_count;
int* csqn_rec_disp;
int* csqn_rec_offset;
int* cleft_rec_count;
int* cleft_rec_disp;
int* cleft_rec_offset;
int my_id;
int nproc;
int coords[3];
MPI_Comm_rank(comm,&my_id);
MPI_Comm_size(comm,&nproc);
if(my_id==0){
h_scale=load_indices_serial(size_x, size_y, size_z, h,
&global_i0_ryr, &global_i1_ryr, &global_i2_ryr, &global_ryr_len,
&global_i0_csqn, &global_i1_csqn,&global_i2_csqn,&global_csqn_len,
&global_i0_cleft, &global_i1_cleft, &global_i2_cleft, &global_cleft_nb,
&global_cleft_len, x_slice_mid,x_slice_width,x_slice_num,
use_failing);
printf("load indices from file: h:%d, h_scale:%d, nx:%d, ny:%d, nz:%d, ryr_len:%d, csqn_len:%d cleft_len:%d\n",
h, h_scale, nx, ny, nz, global_ryr_len, global_csqn_len, global_cleft_len);
if(global_ryr_len>0)
global_states0=malloc(global_ryr_len*sizeof(int));
else
global_states0=malloc(1*sizeof(int));
for ( i = 0; i < global_ryr_len; i++)
global_states0[i]=0;
if(global_ryr_len>=23){
for ( i = 1; i < 23; i =i+3 )
global_states0[i]=1;
}
else
{
for ( i = 1; i < global_ryr_len ; i =i+10 )
global_states0[i]=1;
}
if(DB_PF){
for(i=0;i<global_ryr_len;i++){
if(global_states0[i]==1)
printf("ryr[%d]:%d,%d,%d \n",i,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i]);
}
}
ryr_rec_count=malloc(nproc*sizeof(int));
csqn_rec_count=malloc(nproc*sizeof(int));
cleft_rec_count=malloc(nproc*sizeof(int));
for (i = 0; i < nproc; i++) {
ryr_rec_count[i]=0;
csqn_rec_count[i]=0;
cleft_rec_count[i]=0;
}
for(i=0;i<global_ryr_len;i++) {
j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d);
ryr_rec_count[j]++;
}
for(i=0;i<global_csqn_len;i++) {
j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d);
csqn_rec_count[j]++;
}
for(i=0;i<global_cleft_len;i++) {
j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d);
cleft_rec_count[j]++;
}
for (i = 0; i < nproc; i++) {
if(DB_PF) printf("ryr_rec_count[%d]: %d\n",i, ryr_rec_count[i]);
if(DB_PF) printf("csqn_rec_count[%d]: %d\n",i, csqn_rec_count[i]);
if(DB_PF) printf("cleft_rec_count[%d]: %d\n",i, cleft_rec_count[i]);
}
ryr_rec_disp = malloc(nproc*sizeof(int));
csqn_rec_disp = malloc(nproc*sizeof(int));
cleft_rec_disp = malloc(nproc*sizeof(int));
ryr_rec_disp[0] = 0;
csqn_rec_disp[0] = 0;
cleft_rec_disp[0] = 0;
for (i = 1; i < nproc; i++) {
ryr_rec_disp[i] = ryr_rec_disp[i-1] + ryr_rec_count[i-1];
csqn_rec_disp[i] = csqn_rec_disp[i-1] + csqn_rec_count[i-1];
cleft_rec_disp[i] = cleft_rec_disp[i-1] + cleft_rec_count[i-1];
}
if(global_ryr_len!=ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1])
{
printf("Global ryr Count mismatch %d\n", ryr_rec_disp[nproc-1]+ryr_rec_count[nproc-1]);
}
if(global_csqn_len!=csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1])
{
printf("Global csqn Count mismatch %d\n", csqn_rec_disp[nproc-1]+csqn_rec_count[nproc-1]);
}
if(global_cleft_len!=cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1])
{
printf("Global cleft Count mismatch %d\n", cleft_rec_disp[nproc-1]+cleft_rec_count[nproc-1]);
}
ryr_rec_offset = malloc(nproc*sizeof(int));
csqn_rec_offset = malloc(nproc*sizeof(int));
cleft_rec_offset = malloc(nproc*sizeof(int));
for (i = 0; i < nproc; i++) {
ryr_rec_offset[i]=0;
csqn_rec_offset[i]=0;
cleft_rec_offset[i]=0;
}
global_i0_ryr_reorder=malloc(global_ryr_len*sizeof(int));
global_i1_ryr_reorder=malloc(global_ryr_len*sizeof(int));
global_i2_ryr_reorder=malloc(global_ryr_len*sizeof(int));
global_states0_reorder=malloc(global_ryr_len*sizeof(int));
for(i=0;i<global_ryr_len;i++) {
j=idxbl2rank(nx,ny,nz,global_i0_ryr[i],global_i1_ryr[i],global_i2_ryr[i],coords,comm3d);
global_i0_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i0_ryr[i]-coords[2]*nx+1;
global_i1_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i1_ryr[i]-coords[1]*ny+1;
global_i2_ryr_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_i2_ryr[i]-coords[0]*nz+1;
global_states0_reorder[ryr_rec_disp[j]+ryr_rec_offset[j]]=global_states0[i];
ryr_rec_offset[j]++;
}
for (i = 0; i < nproc; i++) {
if(ryr_rec_offset[i]!=ryr_rec_count[i])
printf("ryr reorder count error on proc %d \n",i);
}
free(global_i0_ryr);
free(global_i1_ryr);
free(global_i2_ryr);
free(global_states0);
free(ryr_rec_offset);
//distribute cleft to there own MPI process
global_i0_csqn_reorder=malloc(global_csqn_len*sizeof(int));
global_i1_csqn_reorder=malloc(global_csqn_len*sizeof(int));
global_i2_csqn_reorder=malloc(global_csqn_len*sizeof(int));
for(i=0;i<global_csqn_len;i++) {
j=idxbl2rank(nx,ny,nz,global_i0_csqn[i],global_i1_csqn[i],global_i2_csqn[i],coords,comm3d);
global_i0_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i0_csqn[i]-coords[2]*nx+1;
global_i1_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i1_csqn[i]-coords[1]*ny+1;
global_i2_csqn_reorder[csqn_rec_disp[j]+csqn_rec_offset[j]]=global_i2_csqn[i]-coords[0]*nz+1;
csqn_rec_offset[j]++;
}
for (i = 0; i < nproc; i++) {
if(csqn_rec_offset[i]!=csqn_rec_count[i])
printf("csqn reorder count error on proc %d \n",i);
}
free(global_i0_csqn);
free(global_i1_csqn);
free(global_i2_csqn);
free(csqn_rec_offset);
global_i0_cleft_reorder=malloc(global_cleft_len*sizeof(int));
global_i1_cleft_reorder=malloc(global_cleft_len*sizeof(int));
global_i2_cleft_reorder=malloc(global_cleft_len*sizeof(int));
global_cleft_nb_reorder=malloc(global_cleft_len*sizeof(int));
for(i=0;i<global_cleft_len;i++) {
j=idxbl2rank(nx,ny,nz,global_i0_cleft[i],global_i1_cleft[i],global_i2_cleft[i],coords,comm3d);
global_i0_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i0_cleft[i]-coords[2]*nx+1;
global_i1_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i1_cleft[i]-coords[1]*ny+1;
global_i2_cleft_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_i2_cleft[i]-coords[0]*nz+1;
global_cleft_nb_reorder[cleft_rec_disp[j]+cleft_rec_offset[j]]=global_cleft_nb[i];
cleft_rec_offset[j]++;
}
for (i = 0; i < nproc; i++) {
if(cleft_rec_offset[i]!=cleft_rec_count[i])
printf("cleft reorder count error on proc %d \n",i);
}
free(global_i0_cleft);
free(global_i1_cleft);
free(global_i2_cleft);
free(global_cleft_nb);
free(cleft_rec_offset);
}
//MPI_Gather(&n_ryr,1,MPI_INT,&states_rec_count[0],1,MPI_INT,0,comm);
MPI_Scatter(&ryr_rec_count[0],1,MPI_INT,ryr_len,1, MPI_INT,0,comm);
MPI_Scatter(&csqn_rec_count[0],1,MPI_INT,csqn_len,1, MPI_INT,0,comm);
MPI_Scatter(&cleft_rec_count[0],1,MPI_INT,cleft_len,1, MPI_INT,0,comm);
if(*ryr_len>0){
*i0_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int));
*i1_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int));
*i2_ryr=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int));
}
else
{
*i0_ryr=(int*)mpi_malloc(my_id,1*sizeof(int));
*i1_ryr=(int*)mpi_malloc(my_id,1*sizeof(int));
*i2_ryr=(int*)mpi_malloc(my_id,1*sizeof(int));
}
if(*csqn_len>0)
{
*i0_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int));
*i1_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int));
*i2_csqn=(int*)mpi_malloc(my_id,*csqn_len*sizeof(int));
}
else
{
*i0_csqn=(int*)mpi_malloc(my_id,1*sizeof(int));
*i1_csqn=(int*)mpi_malloc(my_id,1*sizeof(int));
*i2_csqn=(int*)mpi_malloc(my_id,1*sizeof(int));
}
if(*cleft_len>0)
{
*i0_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int));
*i1_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int));
*i2_cleft=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int));
*cleft_nb=(int*)mpi_malloc(my_id,*cleft_len*sizeof(int));
}
else
{
*i0_cleft=(int*)mpi_malloc(my_id,1*sizeof(int));
*i1_cleft=(int*)mpi_malloc(my_id,1*sizeof(int));
*i2_cleft=(int*)mpi_malloc(my_id,1*sizeof(int));
*cleft_nb=(int*)mpi_malloc(my_id,1*sizeof(int));
}
if(*ryr_len>0){
*states0=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int));
*states1=(int*)mpi_malloc(my_id,*ryr_len*sizeof(int));
for ( i = 0; i < *ryr_len; i += 1 ) {
(*states0)[i]=0;
(*states1)[i]=0;
}
}
else
{
*states0=(int*)mpi_malloc(my_id,1*sizeof(int));
*states1=(int*)mpi_malloc(my_id,1*sizeof(int));
(*states0)[0]=0;
(*states1)[0]=0;
}
MPI_Scatterv(global_i0_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i0_ryr, *ryr_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i1_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i1_ryr, *ryr_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i2_ryr_reorder, ryr_rec_count,ryr_rec_disp, MPI_INT, *i2_ryr, *ryr_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i0_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i0_csqn, *csqn_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i1_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i1_csqn, *csqn_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i2_csqn_reorder, csqn_rec_count,csqn_rec_disp, MPI_INT, *i2_csqn, *csqn_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i0_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i0_cleft, *cleft_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i1_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i1_cleft, *cleft_len, MPI_INT, 0, comm);
MPI_Scatterv(global_i2_cleft_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *i2_cleft, *cleft_len, MPI_INT, 0, comm);
MPI_Scatterv(global_cleft_nb_reorder, cleft_rec_count,cleft_rec_disp, MPI_INT, *cleft_nb, *cleft_len, MPI_INT, 0, comm);
MPI_Scatterv(global_states0_reorder, ryr_rec_count, ryr_rec_disp, MPI_INT, *states0, *ryr_len, MPI_INT, 0, comm);
//MPI_Bcast(&global_ryr_num,1,MPI_INT,0,comm);
if(DB_PF) printf("Thread%d: ryr_len=%d\n",my_id, *ryr_len);
// sprintf(caoutfile,"%s/Ca%d_T%d_rank%d_%d_%d_s0.np",outdirname,i,counter,coord[2],coord[1],coord[0]);
// store2Dmatrixfile_double_1D(caoutfile,C1[i],ny0,nz0,30);
//MPI_Gatherv(states0, n_ryr, MPI_INT, global_states0, states_rec_count, states_rec_disp, MPI_INT, 0, comm);
// if(my_id==2) {
// for(i=0;i<*ryr_len;i++) printf("Thread2 states[%d]: %d\n",i,(*states0)[i]);
// }
if(DB_PF){
for(i=0;i<*ryr_len;i++){
if((*states0)[i]==1){
printf("Proc%d,ryr_len=%d,ryr[%d]:%d,%d,%d \n",my_id, *ryr_len,i,(*i0_ryr)[i],(*i1_ryr)[i],(*i2_ryr)[i]);
}
}
}
if(my_id==0){
free(ryr_rec_count);
free(ryr_rec_disp);
free(csqn_rec_count);
free(csqn_rec_disp);
free(cleft_rec_count);
free(cleft_rec_disp);
free(global_i0_ryr_reorder);
free(global_i1_ryr_reorder);
free(global_i2_ryr_reorder);
free(global_i0_csqn_reorder);
free(global_i1_csqn_reorder);
free(global_i2_csqn_reorder);
free(global_i0_cleft_reorder);
free(global_i1_cleft_reorder);
free(global_i2_cleft_reorder);
free(global_cleft_nb_reorder);
free(global_states0_reorder);
}
return 30/h;
}
//int* loadRyRindexfile_int(char* infile, CONDFN cf, int cond)
int* loadRyRindexfile_int(char* infile, int* count)
{
FILE *fpdata;
int* arreturn;
int i;
int temp_d;
*count=0;
if(DB_PF) printf("Load file name: %s\n", infile);
fpdata = fopen(infile, "r");
if(fpdata==NULL)
{
printf("\nFailure to open input file.\n");
exit(0);
}
while(fscanf(fpdata, "%d", &temp_d)!=EOF){
// if(cf(temp_d,cond)) count++;
(*count)++;
// printf("%d,",temp_d);
}
if(DB_PF) printf("There are %d indices satisfy the condition\n",*count);
arreturn = malloc((*count)*sizeof(int));
if (arreturn == NULL)
{
printf("\nFailure trying to allocate room for array.\n");
exit(0);
}
rewind(fpdata);
i=0;
while(fscanf(fpdata, "%d", &temp_d)!=EOF){
// if(cf(temp_d,cond)) {
arreturn[i]=temp_d;
i++;
// }
}
fclose(fpdata);
if (*count != i)
{
printf("Wrong indices number\n");
exit(0);
}
if(DB_PF) printf("load file %s over \n", infile);
return arreturn;
}
void readparam(int* iconf, double* conf)
{
FILE* file2;
char Data[MAX_LINE_LENGTH];
if((file2=fopen("param","r")) == NULL)
{ printf("Error opening param file\n");
return;
}
// h
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[0]);
// size_x
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[1]);
// size_y
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[2]);
// size_z
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[3]);
// x_domains
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[4]);
// y_domains
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[5]);
// z_domains
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[6]);
// save_data
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[7]);
// use_failing
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[8]);
// T
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%le\n",&conf[0]);
// DT
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%le\n",&conf[1]);
// save data in binary file
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[9]);
// save data in hdf5 format
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d\n",&iconf[10]);
// blocking_y_for_cache
fgets(Data,MAX_LINE_LENGTH,file2);
fscanf(file2,"%d",&iconf[11]);
fclose(file2);
}
void updateBound(double* C00, double* C01, double* C02, double* C03, double* C04,
int C_flag, int nx0, int ny0, int nz0,
double* yz_sbuf0,double* yz_rbuf0,
double* xz_sbuf0,double* xz_rbuf0,
double* xy_sbuf0,double* xy_rbuf0,
double* yz_sbuf1,double* yz_rbuf1,
double* xz_sbuf1,double* xz_rbuf1,
double* xy_sbuf1,double* xy_rbuf1,
int* neighbor, MPI_Status* ar_status, MPI_Request* ar_send_req, MPI_Request* ar_recv_req,
MPI_Comm comm, MPI_Comm comm3d)
{
int i,j,k;
int nx=nx0-2;
int ny=ny0-2;
int nz=nz0-2;
int dims[3];
int periods[3];
int coords[3];
int ZN=0, ZP=1, YN=2, YP=3, XN=4, XP=5;
MPI_Cart_get(comm3d, 3, dims, periods, coords);
// Ghost X end sheet
if(coords[2]==0){
i=0;
for (j=1; j<ny0-1; j++)
for (k=1; k<nz0-1; k++){
C00[i*nz0*ny0+j*nz0+k] = C00[(i+1)*nz0*ny0+j*nz0+k];
C01[i*nz0*ny0+j*nz0+k] = C01[(i+1)*nz0*ny0+j*nz0+k];
C02[i*nz0*ny0+j*nz0+k] = C02[(i+1)*nz0*ny0+j*nz0+k];
C03[i*nz0*ny0+j*nz0+k] = C03[(i+1)*nz0*ny0+j*nz0+k];
C04[i*nz0*ny0+j*nz0+k] = C04[(i+1)*nz0*ny0+j*nz0+k];
}
}
else
{
putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf0[0*ny*nz],ny*nz);
putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf0[1*ny*nz],ny*nz);
putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf0[2*ny*nz],ny*nz);
putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf0[3*ny*nz],ny*nz);
putin_sendbuffer_yz(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf0[4*ny*nz],ny*nz);
}
MPI_Isend(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_send_req[0]);
MPI_Irecv(yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000, comm, &ar_recv_req[0]);
// MPI_Sendrecv(yz_sbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000,
// yz_rbuf0,5*ny*nz,MPI_DOUBLE,neighbor[XN],C_flag+1000,comm,&status);
if(coords[2]==(dims[2]-1))
{
i=nx0-1;
for (j=1; j<ny0-1; j++)
for (k=1; k<nz0-1; k++){
C00[i*nz0*ny0+j*nz0+k] = C00[(i-1)*nz0*ny0+j*nz0+k];
C01[i*nz0*ny0+j*nz0+k] = C01[(i-1)*nz0*ny0+j*nz0+k];
C02[i*nz0*ny0+j*nz0+k] = C02[(i-1)*nz0*ny0+j*nz0+k];
C03[i*nz0*ny0+j*nz0+k] = C03[(i-1)*nz0*ny0+j*nz0+k];
C04[i*nz0*ny0+j*nz0+k] = C04[(i-1)*nz0*ny0+j*nz0+k];
}
}
else
{
putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_sbuf1[0*ny*nz],ny*nz);
putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_sbuf1[1*ny*nz],ny*nz);
putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_sbuf1[2*ny*nz],ny*nz);
putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_sbuf1[3*ny*nz],ny*nz);
putin_sendbuffer_yz( (nx0-2)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_sbuf1[4*ny*nz],ny*nz);
}
MPI_Isend(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_send_req[1]);
MPI_Irecv(yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000, comm, &ar_recv_req[1]);
// MPI_Sendrecv(yz_sbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000,
// yz_rbuf1,5*ny*nz,MPI_DOUBLE,neighbor[XP],C_flag+1000,comm,&status);
// printf("exchange X end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]);
// Ghost Y end sheet
if(coords[1]==0){
j=0;
for (i=1; i<nx0-1; i++)
for (k=1; k<nz0-1; k++){
C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j+1)*nz0+k];
C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j+1)*nz0+k];
C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j+1)*nz0+k];
C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j+1)*nz0+k];
C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j+1)*nz0+k];
}
}
else
{
putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf0[0*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf0[1*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf0[2*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf0[3*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf0[4*nx*nz],nx*nz);
}
MPI_Isend(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_send_req[2]);
MPI_Irecv(xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000, comm, &ar_recv_req[2]);
// MPI_Sendrecv(xz_sbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000,
// xz_rbuf0,5*nx*nz,MPI_DOUBLE,neighbor[YN],C_flag+2000,comm,&status);
if(coords[1]==(dims[1]-1))
{
j=ny0-1;
for (i=1; i<nx0-1; i++)
for (k=1; k<nz0-1; k++){
C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+(j-1)*nz0+k];
C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+(j-1)*nz0+k];
C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+(j-1)*nz0+k];
C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+(j-1)*nz0+k];
C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+(j-1)*nz0+k];
}
}
else
{
putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_sbuf1[0*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_sbuf1[1*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_sbuf1[2*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_sbuf1[3*nx*nz],nx*nz);
putin_sendbuffer_xz( 1*nz0*ny0+(ny0-2)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_sbuf1[4*nx*nz],nx*nz);
}
MPI_Isend(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_send_req[3]);
MPI_Irecv(xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000, comm, &ar_recv_req[3]);
// MPI_Sendrecv(xz_sbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000,
// xz_rbuf1,5*nx*nz,MPI_DOUBLE,neighbor[YP],C_flag+2000,comm,&status);
// printf("exchange Y end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]);
// Ghost Z end sheet
if(coords[0]==0){
k=0;
for (i=1; i<nx0-1; i++)
for (j=1; j<ny0-1; j++){
C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k+1];
C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k+1];
C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k+1];
C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k+1];
C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k+1];
}
}
else
{
putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf0[0*nx*ny],nx*ny);
putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf0[1*nx*ny],nx*ny);
putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf0[2*nx*ny],nx*ny);
putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf0[3*nx*ny],nx*ny);
putin_sendbuffer_xy(1*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf0[4*nx*ny],nx*ny);
}
MPI_Isend(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_send_req[4]);
MPI_Irecv(xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000, comm, &ar_recv_req[4]);
// MPI_Sendrecv(xy_sbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000,
// xy_rbuf0,5*nx*ny,MPI_DOUBLE,neighbor[ZN],C_flag+3000,comm,&status);
if(coords[0]==(dims[0]-1))
{
k=nz0-1;
for (i=1; i<nx0-1; i++)
for (j=1; j<ny0-1; j++){
C00[i*nz0*ny0+j*nz0+k] = C00[i*nz0*ny0+j*nz0+k-1];
C01[i*nz0*ny0+j*nz0+k] = C01[i*nz0*ny0+j*nz0+k-1];
C02[i*nz0*ny0+j*nz0+k] = C02[i*nz0*ny0+j*nz0+k-1];
C03[i*nz0*ny0+j*nz0+k] = C03[i*nz0*ny0+j*nz0+k-1];
C04[i*nz0*ny0+j*nz0+k] = C04[i*nz0*ny0+j*nz0+k-1];
}
}
else
{
putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C00, nx, ny, nz, &xy_sbuf1[0*nx*ny],nx*ny);
putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C01, nx, ny, nz, &xy_sbuf1[1*nx*ny],nx*ny);
putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C02, nx, ny, nz, &xy_sbuf1[2*nx*ny],nx*ny);
putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C03, nx, ny, nz, &xy_sbuf1[3*nx*ny],nx*ny);
putin_sendbuffer_xy( 1*nz0*ny0+nz0+nz0-2,nx0,ny0, nz0, C04, nx, ny, nz, &xy_sbuf1[4*nx*ny],nx*ny);
}
MPI_Isend(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_send_req[5]);
MPI_Irecv(xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000, comm, &ar_recv_req[5]);
// MPI_Sendrecv(xy_sbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000,
// xy_rbuf1,5*nx*ny,MPI_DOUBLE,neighbor[ZP],C_flag+3000,comm,&status);
MPI_Waitall(6, ar_recv_req, ar_status);
if(coords[2]!=0){
getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf0[0*ny*nz],ny*nz);
getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf0[1*ny*nz],ny*nz);
getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf0[2*ny*nz],ny*nz);
getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf0[3*ny*nz],ny*nz);
getout_recvbuffer_yz( 1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf0[4*ny*nz],ny*nz);
}
if(coords[2]!=(dims[2]-1)){
getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &yz_rbuf1[0*ny*nz],ny*nz);
getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &yz_rbuf1[1*ny*nz],ny*nz);
getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &yz_rbuf1[2*ny*nz],ny*nz);
getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &yz_rbuf1[3*ny*nz],ny*nz);
getout_recvbuffer_yz((nx0-1)*nz0*ny0+1*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &yz_rbuf1[4*ny*nz],ny*nz);
}
if(coords[1]!=0){
getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf0[0*nx*nz],nx*nz);
getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf0[1*nx*nz],nx*nz);
getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf0[2*nx*nz],nx*nz);
getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf0[3*nx*nz],nx*nz);
getout_recvbuffer_xz( 1*nz0*ny0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf0[4*nx*nz],nx*nz);
}
if(coords[1]!=(dims[1]-1)){
getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C00, nx, ny, nz, &xz_rbuf1[0*nx*nz],nx*nz);
getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C01, nx, ny, nz, &xz_rbuf1[1*nx*nz],nx*nz);
getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C02, nx, ny, nz, &xz_rbuf1[2*nx*nz],nx*nz);
getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C03, nx, ny, nz, &xz_rbuf1[3*nx*nz],nx*nz);
getout_recvbuffer_xz(1*nz0*ny0+(ny0-1)*nz0+1,nx0,ny0, nz0, C04, nx, ny, nz, &xz_rbuf1[4*nx*nz],nx*nz);
}
if(coords[0]!=0){
getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf0[0*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf0[1*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf0[2*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf0[3*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+1*nz0, nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf0[4*nx*ny],nx*ny);
}
if(coords[0]!=(dims[0]-1)){
getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C00, nx, ny, nz, &xy_rbuf1[0*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C01, nx, ny, nz, &xy_rbuf1[1*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C02, nx, ny, nz, &xy_rbuf1[2*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C03, nx, ny, nz, &xy_rbuf1[3*nx*ny],nx*ny);
getout_recvbuffer_xy( 1*nz0*ny0+nz0+nz0-1,nx0,ny0, nz0, C04, nx, ny, nz, &xy_rbuf1[4*nx*ny],nx*ny);
}
// printf("exchange Z end sheet ok! coords[%d,%d,%d]\n",coords[0],coords[1],coords[2]);
}
void putin_sendbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len)
{
int i;
if(sbuf_len!=ny*nz)
{
printf("yz sbuf_len error!\n");
exit(0);
}
for ( i = 0; i < ny; i += 1 ) {
memcpy(&sbuf[i*nz],&arr[base_addr+i*nz0],nz*sizeof(double));
}
}
void putin_sendbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len)
{
int i;
if(sbuf_len!=nx*nz)
{
printf("xz sbuf_len error!\n");
exit(0);
}
for ( i = 0; i < nx; i += 1 ) {
memcpy(&sbuf[i*nz],&arr[base_addr+i*ny0*nz0],nz*sizeof(double));
}
}
void putin_sendbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len)
{
int i, j;
if(sbuf_len!=nx*ny)
{
printf("xy sbuf_len error!\n");
exit(0);
}
for ( i = 0; i < nx; i += 1 ) {
for ( j = 0; j < ny; j += 1 ) {
sbuf[i*ny+j]=arr[base_addr+i*ny0*nz0+j*nz0];
}
}
}
void getout_recvbuffer_yz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len)
{
int i;
if(sbuf_len!=ny*nz)
{
printf("yz rbuf_len error!\n");
exit(0);
}
for ( i = 0; i < ny; i += 1 ) {
memcpy(&arr[base_addr+i*nz0],&sbuf[i*nz],nz*sizeof(double));
}
}
void getout_recvbuffer_xz(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len)
{
int i;
if(sbuf_len!=nx*nz)
{
printf("xz rbuf_len error!\n");
exit(0);
}
for ( i = 0; i < nx; i += 1 ) {
memcpy(&arr[base_addr+i*ny0*nz0],&sbuf[i*nz],nz*sizeof(double));
}
}
void getout_recvbuffer_xy(int base_addr,int nx0,int ny0, int nz0, double* arr,
int nx, int ny, int nz, double* sbuf, int sbuf_len)
{
int i, j;
if(sbuf_len!=nx*ny)
{
printf("xy rbuf_len error!\n");
exit(0);
}
for ( i = 0; i < nx; i += 1 ) {
for ( j = 0; j < ny; j += 1 ) {
arr[base_addr+i*ny0*nz0+j*nz0]=sbuf[i*ny+j];
}
}
}
void BinarySort_two(int* pData, int* vData, int Count)
{
dichotomy_two(pData,vData,0,Count-1);
}
void dichotomy_two(int* pData,int* vData, int left,int right) {
int i,j;
int middle,iTemp;
i = left;
j = right;
middle = pData[(left+right)/2];
do{
while((pData[i]<middle) && (i<right))
i++;
while((pData[j]>middle) && (j>left))
j--;
if(i<=j)
{
iTemp = pData[i];
pData[i] = pData[j];
pData[j] = iTemp;
iTemp =vData[i];
vData[i]=vData[j];
vData[j]=iTemp;
i++;
j--;
}
}while(i<=j);
if(left<j)
dichotomy_two(pData,vData,left,j);
if(right>i)
dichotomy_two(pData,vData,i,right);
}
void *mpi_malloc (
int id, /* IN - Process rank */
int bytes) /* IN - Bytes to allocate */
{
void *buffer;
if ((buffer = malloc ((size_t) bytes)) == NULL) {
printf ("Error: Malloc failed for process %d\n", id);
fflush (stdout);
MPI_Abort (MPI_COMM_WORLD, 4);
}
return buffer;
}
void compute_pde_ode(int nx0, int ny0, int nz0, double dt,double gamma, double fudge,
double* alpha, double* B_tot, double* k_on, double* k_off,
double** C0, double** C1, int div_y)
{
// Main kernel
int i,j,k,jj,idx;
int ny;
double J;
double Ca_ijk;
double buff_ijk;
double Ca_i2_ijk;
double Ca_SR2_ijk;
ny=ny0-2;
for (i=1; i<nx0-1; i++)
{
for (jj=0; jj<ny/div_y; jj++)
{
//blocking for cache size on y line
for (j=jj*div_y+1; j<(jj+1)*div_y+1; j++)
{
//Laplace diffusion process five array together
for(idx=0;idx<5;idx++)
{
#pragma ivdep
#pragma prefetch
for (k=1; k<nz0-1; k++)
{
C1[idx][i*nz0*ny0+j*nz0+k] =alpha[idx]*(
C0[idx][i*nz0*ny0+j*nz0+k]*(-6)+
C0[idx][(i-1)*nz0*ny0+j*nz0+k] + C0[idx][(i+1)*nz0*ny0+j*nz0+k] +
C0[idx][i*nz0*ny0+(j-1)*nz0+k] + C0[idx][i*nz0*ny0+(j+1)*nz0+k] +
C0[idx][i*nz0*ny0+j*nz0+k-1] + C0[idx][i*nz0*ny0+j*nz0+k+1]) +
C0[idx][i*nz0*ny0+j*nz0+k];
}
}
//Reaction
for(idx=2;idx<6;idx++)
{
#pragma ivdep
#pragma prefetch
for (k=1; k<nz0-1; k++)
{
Ca_ijk = C1[0][i*nz0*ny0+j*nz0+k];
buff_ijk = C1[idx][i*nz0*ny0+j*nz0+k];
J = k_on[idx]*(B_tot[idx] - buff_ijk)*Ca_ijk - k_off[idx]*buff_ijk;
C1[0][i*nz0*ny0+j*nz0+k] -= dt*J;
C1[idx][i*nz0*ny0+j*nz0+k] += dt*J;
}
}
// serca3D
#pragma ivdep
#pragma prefetch
for (k=1; k<nz0-1; k++)
{
// Main kernel
Ca_i2_ijk = C1[0][i*nz0*ny0+j*nz0+k];
Ca_SR2_ijk = C1[1][i*nz0*ny0+j*nz0+k];
Ca_i2_ijk *= Ca_i2_ijk;
Ca_SR2_ijk *= Ca_SR2_ijk;
J = fudge*(570997802.885875*Ca_i2_ijk - 0.0425239333622699*Ca_SR2_ijk)/(106720651.206402*Ca_i2_ijk + 182.498197548666*Ca_SR2_ijk + 5.35062954944879);
C1[0][i*nz0*ny0+j*nz0+k] -= dt*J;
C1[1][i*nz0*ny0+j*nz0+k] += dt*J/gamma;
}
}
}
}
}
|
GB_unop__identity_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_fp64)
// op(A') function: GB (_unop_tran__identity_int8_fp64)
// C type: int8_t
// A type: double
// cast: int8_t cij = GB_cast_to_int8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_fp64)
(
int8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ldexp_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp64)
// C=scalar+B GB (_bind1st__ldexp_fp64)
// C=scalar+B' GB (_bind1st_tran__ldexp_fp64)
// C=A+scalar GB (_bind2nd__ldexp_fp64)
// C=A'+scalar GB (_bind2nd_tran__ldexp_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = ldexp (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ldexp (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LDEXP || GxB_NO_FP64 || GxB_NO_LDEXP_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ldexp_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ldexp_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ldexp_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ldexp_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ldexp_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = ldexp (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ldexp_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = ldexp (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ldexp (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__ldexp_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ldexp (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
add.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add()
{
int i, j, k, m;
#pragma omp parallel for default(shared) private(i,j,k,m)
for (k = 1; k <= nz2; k++) {
for (j = 1; j <= ny2; j++) {
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 5; m++) {
u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];
}
}
}
}
}
|
8321.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morpology is the the application of various kernels, of any size and even
% shape, to a image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
** The following test is for special floating point numbers of value NaN (not
** a number), that may be used within a Kernel Definition. NaN's are defined
** as part of the IEEE standard for floating point number representation.
**
** These are used as a Kernel value to mean that this kernel position is not
** part of the kernel neighbourhood for convolution or morphology processing,
** and thus should be ignored. This allows the use of 'shaped' kernels.
**
** The special properity that two NaN's are never equal, even if they are from
** the same variable allow you to test if a value is special NaN value.
**
** This macro IsNaN() is thus is only true if the value given is NaN.
*/
#define IsNan(a) ((a)!=(a))
/*
Other global definitions used by module.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel = kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *)NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification */
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum = +MagickHuge;
kernel->maximum = -MagickHuge;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* do not include this value in kernel */
}
else {
kernel->values[i] = InterpretLocaleValue(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetMagickToken(p,&p,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if ( kernel->minimum == MagickHuge )
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *)NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
token[MaxTextExtent];
const char
*p;
size_t
kernel_number;
p = kernel_string;
kernel = NULL;
kernel_number = 0;
while ( GetMagickToken(p,NULL,token), *token != '\0' ) {
/* ignore extra or multiple ';' kernel separators */
if ( *token != ';' ) {
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) *token) != 0)
new_kernel = ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel = ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if ( new_kernel == (KernelInfo *) NULL ) {
(void) FormatLocaleFile(stderr, "Failed to parse kernel number #%.20g\n",
(double) kernel_number);
if ( kernel != (KernelInfo *) NULL )
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if ( kernel == (KernelInfo *) NULL )
kernel = new_kernel;
else
LastKernelInfo(kernel)->next = new_kernel;
}
/* look for the next kernel in list */
p = strchr(p, ';');
if ( p == (char *) NULL )
break;
p++;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimentional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this posible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *)NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) AcquireQuantumMemory(1,sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(double));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] =
exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3] = +MagickSQ2;
kernel->values[5] = kernel->values[7] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) > MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if ( kernel->next != (KernelInfo *) NULL )
kernel->next = DestroyKernelInfo(kernel->next);
kernel->values = (double *)RelinquishMagickMemory(kernel->values);
kernel = (KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNan(kernel1->values[i]) && !IsNan(kernel2->values[i]) )
return MagickFalse;
if ( IsNan(kernel2->values[i]) && !IsNan(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) > MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
*last;
last = kernel;
while(1) {
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) == MagickTrue )
break;
LastKernelInfo(last)->next = clone;
last = clone;
}
clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not posible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels.
%
% It is basically equivalent to as MorphologyImageChannel() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without posible interference
% by any API user supplied settings.
%
% It is MorphologyImageChannel() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically kernels are not normalized/scaled/blended by the
% 'convolve:scale' Image Artifact (setting), nor is the convolve bias
% (-bias setting or image->bias) loooked at, but must be supplied from the
% function arguments.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
status=MagickTrue;
changed=0;
progress=0;
p_view=AcquireCacheView(image);
q_view=AcquireCacheView(result_image);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
/*case DistanceMorphology:*/
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, x, -offy,1,
image->rows+kernel->height-1, exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
v;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+y,GetIndexPixelComponent(
p_indexes+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
result.red += (*k)*GetRedPixelComponent(k_pixels);
result.green += (*k)*GetGreenPixelComponent(k_pixels);
result.blue += (*k)*GetBluePixelComponent(k_pixels);
result.opacity += (*k)*GetOpacityPixelComponent(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-GetOpacityPixelComponent(k_pixels)));
gamma += alpha;
result.red += alpha*GetRedPixelComponent(k_pixels);
result.green += alpha*GetGreenPixelComponent(k_pixels);
result.blue += alpha*GetBluePixelComponent(k_pixels);
result.opacity += (*k)*GetOpacityPixelComponent(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetRedPixelComponent(q,ClampToQuantum(gamma*result.red));
SetGreenPixelComponent(q,ClampToQuantum(gamma*result.green));
SetBluePixelComponent(q,ClampToQuantum(gamma*result.blue));
SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,ClampToQuantum(gamma*
result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetRedPixelComponent(q))
|| ( p[r].green != GetGreenPixelComponent(q))
|| ( p[r].blue != GetBluePixelComponent(q))
|| ( p[r].opacity != GetOpacityPixelComponent(q))
|| ( image->colorspace == CMYKColorspace &&
GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,GetIndexPixelComponent(p_indexes+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (MagickRealType) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = (MagickRealType) 0;
/* default result is the original pixel value */
result.red = (MagickRealType) p[r].red;
result.green = (MagickRealType) p[r].green;
result.blue = (MagickRealType) p[r].blue;
result.opacity = QuantumRange - (MagickRealType) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (MagickRealType) GetIndexPixelComponent(p_indexes+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetIndexPixelComponent(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,ClampToQuantum(
result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-
k_pixels[u].opacity));
gamma += alpha;
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetIndexPixelComponent(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetRedPixelComponent(q,ClampToQuantum(gamma*result.red));
SetGreenPixelComponent(q,ClampToQuantum(gamma*result.green));
SetBluePixelComponent(q,ClampToQuantum(gamma*result.blue));
SetOpacityPixelComponent(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,ClampToQuantum(gamma*
result.index));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetIndexPixelComponent(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetIndexPixelComponent(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetIndexPixelComponent(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetIndexPixelComponent(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) < PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
#if 0
This code has been obsoleted by the MorphologyPrimitiveDirect() function.
However it is still (almost) correct coding for Grayscale Morphology.
That is...
GrayErode is equivalent but with kernel values subtracted from pixels
without the kernel rotation
GreyDilate is equivalent but using Maximum() instead of Minimum()
useing kernel rotation
case DistanceMorphology:
/* Add kernel Value and select the minimum value found.
** The result is a iterative distance from edge of image shape.
**
** All Distance Kernels are symetrical, but that may not always
** be the case. For example how about a distance from left edges?
** To work correctly with asymetrical kernels the reflected kernel
** needs to be applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetIndexPixelComponent(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
#endif
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
SetAlphaPixelComponent(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetRedPixelComponent(q) )
|| ( p[r].green != GetGreenPixelComponent(q) )
|| ( p[r].blue != GetBluePixelComponent(q) )
|| ( p[r].opacity != GetOpacityPixelComponent(q) )
|| ( image->colorspace == CMYKColorspace &&
GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the image in two passes.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 'iterative' handling this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireCacheView(image);
auth_view=AcquireCacheView(image);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
SetAlphaPixelComponent(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetRedPixelComponent(q) )
|| ( p[r].green != GetGreenPixelComponent(q) )
|| ( p[r].blue != GetBluePixelComponent(q) )
|| ( p[r].opacity != GetOpacityPixelComponent(q) )
|| ( image->colorspace == CMYKColorspace &&
GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetIndexPixelComponent(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetIndexPixelComponent(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNan(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetRedPixelComponent(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetGreenPixelComponent(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetBluePixelComponent(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue )
SetAlphaPixelComponent(q,ClampToQuantum(result.opacity));
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
SetIndexPixelComponent(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetRedPixelComponent(q) )
|| ( p[r].green != GetGreenPixelComponent(q) )
|| ( p[r].blue != GetBluePixelComponent(q) )
|| ( p[r].opacity != GetOpacityPixelComponent(q) )
|| ( image->colorspace == CMYKColorspace &&
GetIndexPixelComponent(p_indexes+r) != GetIndexPixelComponent(q_indexes+x) ) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling theabove low level primitive application
** functions. This function handles any iteration loops, composition or
** re-iteration of results, and compound morphology methods that is based
** on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[80];
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *)NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"verbose"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue;
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special == MagickTrue )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose == MagickTrue )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose == MagickTrue ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose == MagickTrue ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose == MagickTrue && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose == MagickTrue && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
break;
case EdgeMorphology:
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose == MagickTrue ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose == MagickTrue )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showkernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
if ( method == ConvolveMorphology || method == CorrelateMorphology )
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *)NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
artifact = GetImageArtifact(image,"morphology:compose");
compose = UndefinedCompositeOp; /* use default for method */
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, image->bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod
method, const ssize_t iterations,const KernelInfo *kernel, ExceptionInfo
*exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
case RectangleKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
MagickRealType t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimentional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
size_t
i,j;
register double
*k,t;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) > MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) > MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) > MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNan(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showkernel' option request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(KernelInfo *kernel)
{
KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) > MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNan(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNan(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
laplace2d.c | /*
* Copyright 2012 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <string.h>
#include "timer.h"
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp parallel for shared(m, n, Anew, A)
#pragma acc kernels
for( int j = 1; j < n-1; j++)
{
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
GB_unaryop__lnot_uint64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_fp32
// op(A') function: GB_tran__lnot_uint64_fp32
// C type: uint64_t
// A type: float
// cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_fp32
(
uint64_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_data-3.c | /* { dg-do run } */
#include <stdlib.h>
const int ROWS = 5;
const int COLS = 5;
void init (int Q[][COLS], const int rows, const int cols)
{
int i, j;
for (i = 0; i < rows; i++)
for (j = 0; j < cols; j++)
Q[i][j] = (i + 1) * 100 + (j + 1);
}
void check (int a[][COLS], int b[][COLS], const int rows, const int cols)
{
int i, j;
for (i = 0; i < rows; i++)
for (j = 0; j < cols; j++)
if (a[i][j] != b[i][j])
abort ();
}
void gramSchmidt_ref (int Q[][COLS], const int rows, const int cols)
{
int i, k;
for (k = 0; k < cols; k++)
{
int tmp = 0;
for (i = 0; i < rows; i++)
tmp += (Q[i][k] * Q[i][k]);
for (i = 0; i < rows; i++)
Q[i][k] *= tmp;
}
}
void gramSchmidt (int Q[][COLS], const int rows, const int cols)
{
int i, k;
#pragma omp target data map(Q[0:rows][0:cols]) map(to:COLS)
for (k = 0; k < cols; k++)
{
int tmp = 0;
#pragma omp target map(tofrom:tmp)
#pragma omp parallel for reduction(+:tmp)
for (i = 0; i < rows; i++)
tmp += (Q[i][k] * Q[i][k]);
#pragma omp target
#pragma omp parallel for
for (i = 0; i < rows; i++)
Q[i][k] *= tmp;
}
}
int main ()
{
int (*Q1)[COLS] = (int(*)[COLS]) malloc (ROWS * COLS * sizeof (int));
int (*Q2)[COLS] = (int(*)[COLS]) malloc (ROWS * COLS * sizeof (int));
init (Q1, ROWS, COLS);
init (Q2, ROWS, COLS);
gramSchmidt_ref (Q1, ROWS, COLS);
gramSchmidt (Q2, ROWS, COLS);
check (Q1, Q2, ROWS, COLS);
free (Q1);
free (Q2);
return 0;
}
|
generator_spgemm_csc_bsparse.c | /******************************************************************************
** Copyright (c) 2015-2017, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_bsparse.h"
#include "generator_common.h"
#include <libxsmm_macros.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
LIBXSMM_INTERNAL_API_DEFINITION
void libxsmm_generator_spgemm_csc_bsparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
unsigned int l_n;
unsigned int l_z;
unsigned int l_column_elements;
unsigned int l_flop_count = 0;
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
LIBXSMM_UNUSED(i_values);
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset C if beta is zero */
if ( i_xgemm_desc->beta == 0 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0f; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* determine the correct simd pragma for each architecture */
if ( ( strcmp( i_arch, "noarch" ) == 0 ) ||
( strcmp( i_arch, "wsm" ) == 0 ) ||
( strcmp( i_arch, "snb" ) == 0 ) ||
( strcmp( i_arch, "hsw" ) == 0 ) ) {
if ( i_xgemm_desc->m > 7 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->m > 3 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {}
if ( (i_xgemm_desc->m > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else if ( ( strcmp( i_arch, "knc" ) == 0 ) ||
( strcmp( i_arch, "knl" ) == 0 ) ||
( strcmp( i_arch, "skx" ) == 0 ) ) {
if ( (i_xgemm_desc->m > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(32)\n #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else {
libxsmm_handle_error( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
/* generate the actuel kernel */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_n = 0; l_n < (unsigned int)i_xgemm_desc->n; l_n++ ) {
l_column_elements = i_column_idx[l_n+1] - i_column_idx[l_n];
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
/* check k such that we just use rows which actually need to be multiplied */
if ( i_row_idx[i_column_idx[l_n] + l_z] < (unsigned int)i_xgemm_desc->k ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_m] += A[%u+l_m] * B[%u];\n", l_n * i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_n] + l_z]*i_xgemm_desc->lda, i_column_idx[l_n] + l_z);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
GB_binop__eq_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int32)
// A*D function (colscale): GB (_AxD__eq_int32)
// D*A function (rowscale): GB (_DxB__eq_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int32)
// C=scalar+B GB (_bind1st__eq_int32)
// C=scalar+B' GB (_bind1st_tran__eq_int32)
// C=A+scalar GB (_bind2nd__eq_int32)
// C=A'+scalar GB (_bind2nd_tran__eq_int32)
// C type: bool
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT32 || GxB_NO_EQ_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
re_model_template.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_RE_MODEL_TEMPLATE_H_
#define GPB_RE_MODEL_TEMPLATE_H_
#define _USE_MATH_DEFINES // for M_PI
#include <cmath>
#include <GPBoost/type_defs.h>
#include <GPBoost/re_comp.h>
#include <GPBoost/sparse_matrix_utils.h>
#include <GPBoost/Vecchia_utils.h>
#include <GPBoost/GP_utils.h>
#include <GPBoost/likelihoods.h>
//#include <Eigen/src/misc/lapack.h>
#include <memory>
#include <mutex>
#include <vector>
#include <algorithm> // std::shuffle
#include <random> // std::default_random_engine
//#include <typeinfo> // Only needed for debugging
#include <chrono> // only needed for debugging
#include <thread> // only needed for debugging
//std::this_thread::sleep_for(std::chrono::milliseconds(200));// Only for debugging
//std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();// Only for debugging
//std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();// Only for debugging
//double el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count()) / 1000000.;// Only for debugging
//Log::REInfo("Time for : %g", el_time);// Only for debugging
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795029
#endif
#include <LightGBM/utils/log.h>
using LightGBM::Log;
namespace GPBoost {
/*!
* \brief Template class used in the wrapper class REModel
* The template parameters <T_mat, T_chol> can be either <den_mat_t, chol_den_mat_t> or <sp_mat_t, chol_sp_mat_t>
* depending on whether dense or sparse linear matrix algebra is used
*/
template<typename T_mat, typename T_chol>
class REModelTemplate {
public:
/*! \brief Null costructor */
REModelTemplate();
/*!
* \brief Costructor
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of random effects / Gaussian processes (same values = same process realization)
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param num_re_group Number of grouped (intercept) random effects
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_re_group_rand_coef Number of grouped random coefficient
* \param num_gp Number of (intercept) Gaussian processes
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian process. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006)
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance, irrelevant for some covariance functions such as the exponential or Gaussian)
* \param vecchia_approx If true, the Veccia approximation is used for the Gaussian process
* \param num_neighbors The number of neighbors used in the Vecchia approximation
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions
* \param likelihood Likelihood function for the observed response variable. Default = "gaussian"
*/
REModelTemplate(data_size_t num_data,
const gp_id_t* cluster_ids_data = nullptr,
const char* re_group_data = nullptr,
data_size_t num_re_group = 0,
const double* re_group_rand_coef_data = nullptr,
const int32_t* ind_effect_group_rand_coef = nullptr,
data_size_t num_re_group_rand_coef = 0,
data_size_t num_gp = 0,
const double* gp_coords_data = nullptr,
int dim_gp_coords = 2,
const double* gp_rand_coef_data = nullptr,
data_size_t num_gp_rand_coef = 0,
const char* cov_fct = nullptr,
double cov_fct_shape = 0.,
bool vecchia_approx = false,
int num_neighbors = 30,
const char* vecchia_ordering = nullptr,
const char* vecchia_pred_type = nullptr,
int num_neighbors_pred = 30,
const char* likelihood = nullptr) {
CHECK(num_data > 0);
num_data_ = num_data;
vecchia_approx_ = vecchia_approx;
//Set up likelihood
string_t likelihood_strg;
if (likelihood == nullptr) {
likelihood_strg = "gaussian";
}
else {
likelihood_strg = std::string(likelihood);
}
gauss_likelihood_ = likelihood_strg == "gaussian";
//Set up GP IDs
SetUpGPIds(num_data_, cluster_ids_data, num_data_per_cluster_, data_indices_per_cluster_, unique_clusters_, num_clusters_);
num_comps_total_ = 0;
//Do some checks for grouped RE components and set meta data (number of components etc.)
std::vector<std::vector<string_t>> re_group_levels;//Matrix with group levels for the grouped random effects (re_group_levels[j] contains the levels for RE number j)
if (num_re_group > 0) {
if (vecchia_approx) {
Log::REFatal("The Veccia approximation cannot be used when there are grouped random effects (in the current implementation).");
}
num_re_group_ = num_re_group;
CHECK(re_group_data != nullptr);
if (num_re_group_rand_coef > 0) {
num_re_group_rand_coef_ = num_re_group_rand_coef;
CHECK(re_group_rand_coef_data != nullptr);
CHECK(ind_effect_group_rand_coef != nullptr);
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
CHECK(0 < ind_effect_group_rand_coef[j] && ind_effect_group_rand_coef[j] <= num_re_group_);
}
ind_effect_group_rand_coef_ = std::vector<int>(ind_effect_group_rand_coef, ind_effect_group_rand_coef + num_re_group_rand_coef_);
}
num_re_group_total_ = num_re_group_ + num_re_group_rand_coef_;
num_comps_total_ += num_re_group_total_;
// Convert characters in 'const char* re_group_data' to matrix (num_re_group_ x num_data_) with strings of group labels
re_group_levels = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_));
if (num_re_group_ > 0) {
ConvertCharToStringGroupLevels(num_data_, num_re_group_, re_group_data, re_group_levels);
}
}
//Do some checks for GP components and set meta data (number of components etc.)
if (num_gp > 0) {
if (num_gp > 1) {
Log::REFatal("num_gp can only be either 0 or 1 in the current implementation");
}
num_gp_ = num_gp;
ind_intercept_gp_ = num_comps_total_;
CHECK(dim_gp_coords > 0);
CHECK(gp_coords_data != nullptr);
CHECK(cov_fct != nullptr);
dim_gp_coords_ = dim_gp_coords;
cov_fct_ = std::string(cov_fct);
cov_fct_shape_ = cov_fct_shape;
if (vecchia_approx) {
Log::REInfo("Starting nearest neighbor search for Vecchia approximation");
CHECK(num_neighbors > 0);
num_neighbors_ = num_neighbors;
CHECK(num_neighbors_pred > 0);
num_neighbors_pred_ = num_neighbors_pred;
if (vecchia_ordering == nullptr) {
vecchia_ordering_ = "none";
}
else {
vecchia_ordering_ = std::string(vecchia_ordering);
CHECK(vecchia_ordering_ == "none" || vecchia_ordering_ == "random");
}
if (vecchia_pred_type == nullptr) {
vecchia_pred_type_ = "order_obs_first_cond_obs_only";
}
else {
vecchia_pred_type_ = std::string(vecchia_pred_type);
if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) {
Log::REFatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_.c_str());
}
}
}
if (num_gp_rand_coef > 0) {//Random slopes
CHECK(gp_rand_coef_data != nullptr);
num_gp_rand_coef_ = num_gp_rand_coef;
}
num_gp_total_ = num_gp_ + num_gp_rand_coef_;
num_comps_total_ += num_gp_total_;
if (vecchia_approx) {
double num_mem_d = ((double)num_gp_total_) * ((double)num_data_) * ((double)num_neighbors_) * ((double)num_neighbors_);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 8000) {
Log::REWarning("The current implementation of the Vecchia approximation is not optimized for memory usage. In your case (num. obs. = %d and num. neighbors = %d), at least approximately %d mb of memory is needed. If this is a problem, contact the developer of this package and ask to implement this feature.", num_data_, num_neighbors_, mem_size);
}
}
}
DetermineSpecialCasesModelsEstimationPrediction();
//Create RE/GP component models
for (const auto& cluster_i : unique_clusters_) {
std::vector<std::shared_ptr<RECompBase<T_mat>>> re_comps_cluster_i;
if (vecchia_approx_) {
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
CreateREComponentsVecchia(num_data_,
data_indices_per_cluster_,
cluster_i,
num_data_per_cluster_,
gp_coords_data,
dim_gp_coords_,
gp_rand_coef_data,
num_gp_rand_coef_,
cov_fct_,
cov_fct_shape_,
re_comps_cluster_i,
nearest_neighbors_cluster_i,
dist_obs_neighbors_cluster_i,
dist_between_neighbors_cluster_i,
entries_init_B_cluster_i,
entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i,
vecchia_ordering_,
num_neighbors_);
nearest_neighbors_.insert({ cluster_i, nearest_neighbors_cluster_i });
dist_obs_neighbors_.insert({ cluster_i, dist_obs_neighbors_cluster_i });
dist_between_neighbors_.insert({ cluster_i, dist_between_neighbors_cluster_i });
entries_init_B_.insert({ cluster_i, entries_init_B_cluster_i });
entries_init_B_grad_.insert({ cluster_i, entries_init_B_grad_cluster_i });
z_outer_z_obs_neighbors_.insert({ cluster_i, z_outer_z_obs_neighbors_cluster_i });
}//end vecchia_approx_
else {//not vecchia_approx_
CreateREComponents(num_data_,
num_re_group_,
data_indices_per_cluster_,
cluster_i,
re_group_levels,
num_data_per_cluster_,
num_re_group_rand_coef_,
re_group_rand_coef_data,
ind_effect_group_rand_coef_,
num_gp_, gp_coords_data,
dim_gp_coords_,
gp_rand_coef_data,
num_gp_rand_coef_,
cov_fct_,
cov_fct_shape_,
ind_intercept_gp_,
!only_grouped_REs_use_woodbury_identity_,
re_comps_cluster_i);
}//end not vecchia_approx_
re_comps_.insert({ cluster_i, re_comps_cluster_i });
}//end loop over clusters
//Create matrices Z and ZtZ if Woodbury identity is used (used only if there are only grouped REs and no GPs)
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity();
}
InitializeIdentityMatricesForGaussianData();
if (vecchia_approx_) {
Log::REInfo("Nearest neighbors for Vecchia approximation found");
}
CheckCompatibilitySpecialOptions();
InitializeLikelihoods(likelihood_strg);
DetermineCovarianceParameterIndicesNumCovPars();
////Following only prints things for debugging
//Log::REInfo("********************** Meta data ********************************");
//Log::REInfo("num_data_ : %d", num_data_);
//Log::REInfo("num_clusters_ : %d", num_clusters_);
//Log::REInfo("num_re_group_ : %d", num_re_group_);
//Log::REInfo("num_re_group_rand_coef_ : %d", num_re_group_rand_coef_);
//Log::REInfo("num_re_group_total_ : %d", num_re_group_total_);
//Log::REInfo("num_gp_rand_coef_ : %d", num_gp_rand_coef_);
//Log::REInfo("num_gp_total_ : %d", num_gp_total_);
//Log::REInfo("num_cov_par_: %d", num_cov_par_);
//for (unsigned i = 0; i < ind_par_.size(); i++) { Log::REInfo("ind_par_[%d]: %d", i, ind_par_[i]); }
//Log::REInfo("******************************************************");
//int ii = 0;
//for (const auto& cluster_i : unique_clusters_) {
// Log::REInfo("unique_clusters_[%d]: %d", ii, cluster_i);
// Log::REInfo("num_data_per_cluster_[%d]: %d", cluster_i, num_data_per_cluster_[cluster_i]);
// //for (int j = 0; j < std::min((int)data_indices_per_cluster_[cluster_i].size(), 10); ++j) { Log::REInfo("data_indices_per_cluster_[%d][%d]: %d", cluster_i, j, data_indices_per_cluster_[cluster_i][j]); }
// if (num_re_group_ > 0) {
// Log::REInfo("*********************** Grouped REs *******************************");
// //Log::REInfo("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name());
// //Log::REInfo("re_comps_[cluster_i].size(): %d", re_comps_[cluster_i].size());
// //for (const auto& re_comp : re_comps_[cluster_i]) {
// for (int j = 0; j < re_comps_[cluster_i].size(); ++j) {
// std::shared_ptr<RECompGroup<T_mat>> re_comp_group = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][j]);
// //for (const auto& el : re_comp_group->group_data_) { Log::REInfo("re_comps_[%d][j].group_data_[i]: %d", cluster_i, el); }
// if (!re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min((int)(*re_comp_group->group_data_).size(), 10); i++) { Log::REInfo("re_comps_[%d][%d].group_data_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// }
// else if (re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::REInfo("re_comps_[%d][%d].group_data_ref_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::REInfo("re_comps_[%d][%d].rand_coef_data_[%d]: %g", cluster_i, j, i, re_comp_group->rand_coef_data_[i]); }
// }
// }
// }
// ii++;
//}
}//end REModelTemplate
/*! \brief Destructor */
~REModelTemplate() {
}
/*! \brief Disable copy */
REModelTemplate& operator=(const REModelTemplate&) = delete;
/*! \brief Disable copy */
REModelTemplate(const REModelTemplate&) = delete;
/*!
* \brief Returns the type of likelihood
*/
string_t GetLikelihood() {
return(likelihood_[unique_clusters_[0]]->GetLikelihood());
}
/*!
* \brief Set / change the type of likelihood
* \param likelihood Likelihood name
*/
void SetLikelihood(const string_t& likelihood) {
bool gauss_likelihood_before = gauss_likelihood_;
bool only_one_grouped_RE_calculations_on_RE_scale_before = only_one_grouped_RE_calculations_on_RE_scale_;
bool only_grouped_REs_use_woodbury_identity_before = only_grouped_REs_use_woodbury_identity_;
gauss_likelihood_ = likelihood == "gaussian";
DetermineSpecialCasesModelsEstimationPrediction();
CheckCompatibilitySpecialOptions();
//Make adaptions in re_comps_ for special options when switching between Gaussian and non-Gaussian likelihoods
if (gauss_likelihood_before && !gauss_likelihood_) {
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
for (const auto& cluster_i : unique_clusters_) {
re_comps_[cluster_i][0]->DropZ();
}
}
}
else if (!gauss_likelihood_before && gauss_likelihood_) {
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
for (const auto& cluster_i : unique_clusters_) {
re_comps_[cluster_i][0]->AddZ();
}
}
}
//Matrices used when only_grouped_REs_use_woodbury_identity_==true
if ((only_grouped_REs_use_woodbury_identity_ && !only_grouped_REs_use_woodbury_identity_before) ||
(only_grouped_REs_use_woodbury_identity_ && only_one_grouped_RE_calculations_on_RE_scale_before && !only_one_grouped_RE_calculations_on_RE_scale_)) {
InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity();
}
else if (!only_grouped_REs_use_woodbury_identity_) {
//Delete not required matrices
Zt_ = std::map<gp_id_t, sp_mat_t>();
ZtZ_ = std::map<gp_id_t, sp_mat_t>();
cum_num_rand_eff_ = std::map<gp_id_t, std::vector<data_size_t>>();
Zj_square_sum_ = std::map<gp_id_t, std::vector<double>>();
ZtZj_ = std::map<gp_id_t, std::vector<sp_mat_t>>();
}
//Identity matrices for Gaussian data
if (!gauss_likelihood_before && gauss_likelihood_) {
InitializeIdentityMatricesForGaussianData();
}
else if (gauss_likelihood_before && !gauss_likelihood_) {
//Delete not required matrices
Id_ = std::map<gp_id_t, T_mat>();
Id_cs_ = std::map<gp_id_t, cs>();
}
InitializeLikelihoods(likelihood);
DetermineCovarianceParameterIndicesNumCovPars();
}
/*!
* \brief Find linear regression coefficients and covariance parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent
* Note: You should pre-allocate memory for optim_cov_pars and optim_coef. Their length equal the number of covariance parameters and the number of regression coefficients
* If calc_std_dev=true, you also need to pre-allocate memory for std_dev_cov_par and std_dev_coef of the same length for the standard deviations
* \param y_data Response variable data
* \param covariate_data Covariate data (=independent variables, features). Set to nullptr if there is no covariate data
* \param num_covariates Number of covariates
* \param[out] optim_cov_pars Optimal covariance parameters
* \param[out] optim_coef Optimal regression coefficients
* \param[out] num_it Number of iterations
* \param init_cov_pars Initial values for covariance parameters of RE components
* \param init_coef Initial values for the regression coefficients
* \param lr_coef Learning rate for fixed-effect linear coefficients
* \param lr_cov Learning rate for covariance parameters. If lr<= 0, default values are used. Default value = 0.1 for "gradient_descent" and 1. for "fisher_scoring"
* \param acc_rate_coef Acceleration rate for coefficients for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param max_iter Maximal number of iterations
* \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true, only used for "gradient_descent"
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param optimizer_cov Optimizer for covariance parameters. Options: "gradient_descent" or "fisher_scoring" (default)
* \param optimizer_coef Optimizer for coefficients. Options: "gradient_descent" or "wls" (coordinate descent using weighted least squares, default)
* \param[out] std_dev_cov_par Standard deviations for the covariance parameters
* \param[out] std_dev_coef Standard deviations for the coefficients
* \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information
* \param convergence_criterion The convergence criterion used for terminating the optimization algorithm. Options: "relative_change_in_log_likelihood" (default) or "relative_change_in_parameters"
* \param fixed_effects Fixed effects component of location parameter (only used for non-Gaussian data)
* \param learn_covariance_parameters If true, covariance parameters are estimated (default = true)
*/
void OptimLinRegrCoefCovPar(const double* y_data,
const double* covariate_data,
int num_covariates,
double* optim_cov_pars,
double* optim_coef,
int& num_it,
double* init_cov_pars,
double* init_coef = nullptr,
double lr_coef = 0.1,
double lr_cov = -1.,
double acc_rate_coef = 0.5,
double acc_rate_cov = 0.5,
int momentum_offset = 2,
int max_iter = 1000,
double delta_rel_conv = 1.0e-6,
bool use_nesterov_acc = true,
int nesterov_schedule_version = 0,
string_t optimizer_cov = "fisher_scoring",
string_t optimizer_coef = "wls",
double* std_dev_cov_par = nullptr,
double* std_dev_coef = nullptr,
bool calc_std_dev = false,
string_t convergence_criterion = "relative_change_in_log_likelihood",
const double* fixed_effects = nullptr,
bool learn_covariance_parameters = true) {
// Some checks
if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer_cov) == SUPPORTED_OPTIM_COV_PAR_.end()) {
Log::REFatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer_cov.c_str());
}
if (SUPPORTED_CONV_CRIT_.find(convergence_criterion) == SUPPORTED_CONV_CRIT_.end()) {
Log::REFatal("Convergence criterion '%s' is not supported.", convergence_criterion.c_str());
}
if (!gauss_likelihood_) {
if (optimizer_cov != "gradient_descent") {
Log::REFatal("Optimizer option '%s' is not supported for covariance parameters for non-Gaussian data. Only 'gradient_descent' is supported.", optimizer_cov.c_str());
}
if (calc_std_dev) {
Log::REFatal("Calculation of standard deviations is not supported for non-Gaussian data.");
}
}
if (covariate_data != nullptr) {
if (SUPPORTED_OPTIM_COEF_.find(optimizer_coef) == SUPPORTED_OPTIM_COEF_.end()) {
Log::REFatal("Optimizer option '%s' is not supported for regression coefficients.", optimizer_coef.c_str());
}
if (!gauss_likelihood_ && optimizer_coef != "gradient_descent") {
Log::REFatal("Optimizer option '%s' is not supported for linear regression coefficients for non-Gaussian data. Only 'gradient_descent' is supported.", optimizer_coef.c_str());
}
}
if (gauss_likelihood_ && fixed_effects != nullptr) {
Log::REFatal("Additional external fixed effects in 'fixed_effects' can currently only be used for non-Gaussian data");
}
// Initialization of variables
if (covariate_data == nullptr) {
has_covariates_ = false;
}
else {
has_covariates_ = true;
}
bool use_nesterov_acc_coef = use_nesterov_acc;
if (optimizer_cov != "gradient_descent") {
use_nesterov_acc = false;//Nesterov acceleration is only used for gradient descent, not for Fisher scoring
}
if (optimizer_coef != "gradient_descent") {
use_nesterov_acc_coef = false;//Nesterov acceleration is only used for gradient descent, not for Fisher scoring
}
bool terminate_optim = false;
num_it = max_iter;
bool profile_out_marginal_variance = (optimizer_cov == "gradient_descent" && gauss_likelihood_);
// Profiling out sigma (=use closed-form expression for error / nugget variance) is better for gradient descent for Gaussian data (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...)
const double* fixed_effects_ptr = fixed_effects;
// Initialization of covariance parameters related variables
if (lr_cov < 0.) {//a value below 0 indicates that the default values should be used
if (optimizer_cov == "fisher_scoring") {
lr_cov = 1.;
}
else if (optimizer_cov == "gradient_descent") {
lr_cov = 0.1;
}
}
vec_t cov_pars = Eigen::Map<const vec_t>(init_cov_pars, num_cov_par_);
vec_t cov_pars_lag1 = vec_t(num_cov_par_);//used only if convergence_criterion == "relative_change_in_parameters"
vec_t cov_pars_after_grad_aux;//auxiliary variable used only if use_nesterov_acc == true
vec_t cov_pars_after_grad_aux_lag1 = cov_pars;//auxiliary variable used only if use_nesterov_acc == true
// Set response variabla data (if needed)
if ((!has_covariates_ || !gauss_likelihood_) && y_data != nullptr) {
SetY(y_data);
}
if (!has_covariates_ || !gauss_likelihood_) {
CHECK(y_has_been_set_);//response variable data needs to have been set at this point for non-Gaussian data and for Gaussian data without covariates
}
// Initialization of linear regression coefficients related variables
vec_t beta, beta_lag1, beta_after_grad_aux, beta_after_grad_aux_lag1, resid, fixed_effects_vec;
if (has_covariates_) {
num_coef_ = num_covariates;
X_ = Eigen::Map<const den_mat_t>(covariate_data, num_data_, num_coef_);
//Check whether one of the colums contains only 1's and if not, give out warning
vec_t vec_ones(num_data_);
vec_ones.setOnes();
bool has_intercept = false;
for (int icol = 0; icol < num_coef_; ++icol) {
if ((X_.col(icol) - vec_ones).cwiseAbs().sum() < 0.001) {
has_intercept = true;
break;
}
}
if (!has_intercept) {
Log::REWarning("The covariate data contains no column of ones. This means that there is no intercept included.");
}
beta = vec_t(num_covariates);
if (init_coef == nullptr) {
beta.setZero();
}
else {
beta = Eigen::Map<const vec_t>(init_coef, num_covariates);
}
beta_after_grad_aux_lag1 = beta;
if (gauss_likelihood_) {
CHECK(y_data != nullptr);
// Copy of response data (used only for Gaussian data and if there are also linear covariates since then y_ is modified during the optimization algorithm and this contains the original data)
y_vec_ = Eigen::Map<const vec_t>(y_data, num_data_);
y_has_been_set_ = true;
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
}
else {
fixed_effects_vec = X_ * beta;
if (fixed_effects != nullptr) {//add external fixed effects to linear predictor
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
fixed_effects_vec[i] += fixed_effects[i];
}
}
fixed_effects_ptr = fixed_effects_vec.data();
}
}//end if has_covariates_
Log::REDebug("Initial covariance parameters");
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::REDebug("cov_pars[%d]: %g", i, cov_pars[i]); }
if (has_covariates_) {
Log::REDebug("Initial linear regression coefficients");
for (int i = 0; i < std::min((int)beta.size(), 3); ++i) { Log::REDebug("beta[%d]: %g", i, beta[i]); }
}
// Initialize optimizer:
// - factorize the covariance matrix (Gaussian data) or calculate the posterior mode of the random effects for use in the Laplace approximation (non-Gaussian data)
// - calculate initial value of objective function
CalcCovFactorOrModeAndNegLL(cov_pars, fixed_effects_ptr);
// TODO: for likelihood evaluation we don't need y_aux = Psi^-1 * y but only Psi^-0.5 * y. So, if has_covariates_==true, we might skip this step here and save some time
if (gauss_likelihood_) {
Log::REDebug("Initial negative log-likelihood: %g", neg_log_likelihood_);
}
else {
Log::REDebug("Initial approximate negative marginal log-likelihood: %g", neg_log_likelihood_);
}
// Start optimization
for (int it = 0; it < max_iter; ++it) {
neg_log_likelihood_lag1_ = neg_log_likelihood_;
cov_pars_lag1 = cov_pars;
// Update linear regression coefficients using gradient descent or generalized least squares (the latter option only for Gaussian data)
if (has_covariates_) {
beta_lag1 = beta;
if (optimizer_coef == "gradient_descent") {// one step of gradient descent
vec_t grad_beta;
// Calculate gradient for linear regression coefficients
CalcLinCoefGrad(cov_pars[0], beta, grad_beta, fixed_effects_ptr);
// Update linear regression coefficients, apply step size safeguard, and recalculate mode for Laplace approx. (only for non-Gaussian data)
UpdateLinCoef(beta, grad_beta, lr_coef, cov_pars, use_nesterov_acc_coef, it, beta_after_grad_aux, beta_after_grad_aux_lag1,
acc_rate_coef, nesterov_schedule_version, momentum_offset, fixed_effects, fixed_effects_vec);
fixed_effects_ptr = fixed_effects_vec.data();
}
else if (optimizer_coef == "wls") {// coordinate descent using generalized least squares (only for Gaussian data)
CHECK(gauss_likelihood_);
SetY(y_vec_.data());
CalcYAux();
UpdateCoefGLS(X_, beta);
// Set resid for updating covariance parameters
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
// Calculate y_aux = Psi^-1 * y (if not only_grouped_REs_use_woodbury_identity_) or y_tilde and y_tilde2 (if only_grouped_REs_use_woodbury_identity_) for covariance parameter update (only for Gaussian data)
if (only_grouped_REs_use_woodbury_identity_) {
CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else {
CalcYAux();//y_aux = Psi^-1 * y
}
EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_likelihood_after_lin_coef_update_, true, true, true);
}
}
else {
neg_log_likelihood_after_lin_coef_update_ = neg_log_likelihood_lag1_;
}
// end update regression coefficients
// Update covariance parameters using one step of gradient descent or Fisher scoring
if (learn_covariance_parameters) {
// Calculate gradient or natural gradient = FI^-1 * grad (for Fisher scoring)
vec_t nat_grad; // nat_grad = grad for gradient descent and nat_grad = FI^-1 * grad for Fisher scoring (="natural" gradient)
if (optimizer_cov == "gradient_descent") {//gradient descent
if (gauss_likelihood_) {
// First, profile out sigma (=use closed-form expression for error / nugget variance) since this is better for gradient descent (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...)
CalcYTPsiIInvY<T_mat>(cov_pars[0], true, 1, true, true);
cov_pars[0] /= num_data_;
sigma2_ = cov_pars[0];
}
CalcCovParGrad(cov_pars, nat_grad, false, false, fixed_effects_ptr);
}
else if (optimizer_cov == "fisher_scoring") {//Fisher scoring
// We don't profile out sigma (=don't use closed-form expression for error / nugget variance) since this is better for Fisher scoring (otherwise much more iterations are needed)
vec_t grad;
den_mat_t FI;
CalcCovParGrad(cov_pars, grad, true, true, fixed_effects_ptr);
CalcFisherInformation(cov_pars, FI, true, true, true);
nat_grad = FI.llt().solve(grad);
}
// Update covariance parameters, apply step size safeguard, factorize covariance matrix, and calculate new value of objective function
UpdateCovPars(cov_pars, nat_grad, lr_cov, profile_out_marginal_variance, use_nesterov_acc, it, optimizer_cov,
cov_pars_after_grad_aux, cov_pars_after_grad_aux_lag1, acc_rate_cov, nesterov_schedule_version, momentum_offset, fixed_effects_ptr);
// Check for NA or Inf
if (std::isnan(cov_pars[0]) || std::isinf(cov_pars[0])) {
Log::REFatal("NaN or Inf occurred in covariance parameters. If this is a problem, consider doing the following. If you have used Fisher scoring, try using gradient descent. If you have used gradient descent, consider using a smaller learning rate.");
}
}
else {
neg_log_likelihood_ = neg_log_likelihood_after_lin_coef_update_;
}
// end update covariance parameters
// Check convergence
bool likelihood_is_na = std::isnan(neg_log_likelihood_) || std::isinf(neg_log_likelihood_);//if the likelihood is NA, we monitor the parameters instead of the likelihood
if (convergence_criterion == "relative_change_in_parameters" || likelihood_is_na) {
if (has_covariates_) {
if (((beta - beta_lag1).norm() < delta_rel_conv * beta_lag1.norm()) && ((cov_pars - cov_pars_lag1).norm() < delta_rel_conv * cov_pars_lag1.norm())) {
terminate_optim = true;
}
}
else {
if ((cov_pars - cov_pars_lag1).norm() < delta_rel_conv * cov_pars_lag1.norm()) {
terminate_optim = true;
}
}
}
else if (convergence_criterion == "relative_change_in_log_likelihood") {
if (std::abs(neg_log_likelihood_ - neg_log_likelihood_lag1_) < delta_rel_conv * std::abs(neg_log_likelihood_lag1_)) {
terminate_optim = true;
}
}
// Output for debugging
if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) {
Log::REDebug("GPModel parameter optimization iteration number %d", it + 1);
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::REDebug("cov_pars[%d]: %g", i, cov_pars[i]); }
for (int i = 0; i < std::min((int)beta.size(), 5); ++i) { Log::REDebug("beta[%d]: %g", i, beta[i]); }
if (has_covariates_ && beta.size() > 5) {
Log::REDebug("Note: only the first 5 linear regression coefficients are shown");
}
if (gauss_likelihood_) {
Log::REDebug("Negative log-likelihood: %g", neg_log_likelihood_);
}
else {
Log::REDebug("Approximate negative marginal log-likelihood: %g", neg_log_likelihood_);
}
}
// Check whether to terminate
if (terminate_optim) {
num_it = it + 1;
break;
}
}//end for loop for optimization
if (num_it == max_iter) {
Log::REDebug("GPModel: no convergence after the maximal number of iterations");
}
else {
Log::REDebug("GPModel parameter estimation finished after %d iteration", num_it);
}
for (int i = 0; i < num_cov_par_; ++i) {
optim_cov_pars[i] = cov_pars[i];
}
if (calc_std_dev) {
vec_t std_dev_cov(num_cov_par_);
CalcStdDevCovPar(cov_pars, std_dev_cov);//TODO: maybe another call to CalcCovFactor can be avoided in CalcStdDevCovPar (need to take care of cov_pars[0])
for (int i = 0; i < num_cov_par_; ++i) {
std_dev_cov_par[i] = std_dev_cov[i];
}
}
if (has_covariates_) {
for (int i = 0; i < num_covariates; ++i) {
optim_coef[i] = beta[i];
}
if (calc_std_dev) {
vec_t std_dev_beta(num_covariates);
CalcStdDevCoef(cov_pars, X_, std_dev_beta);
for (int i = 0; i < num_covariates; ++i) {
std_dev_coef[i] = std_dev_beta[i];
}
}
}
}//end OptimLinRegrCoefCovPar
/*!
* \brief Calculate the value of the negative log-likelihood
* \param y_data Response variable data
* \param cov_pars Values for covariance parameters of RE components
* \param[out] negll Negative log-likelihood
* \param CalcCovFactor_already_done If true, it is assumed that the covariance matrix has already been factorized
* \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant if not only_grouped_REs_use_woodbury_identity_)
* \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_)
*/
void EvalNegLogLikelihood(const double* y_data, const double* cov_pars, double& negll,
bool CalcCovFactor_already_done = false, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) {
CHECK(!(CalcYAux_already_done && !CalcCovFactor_already_done));// CalcYAux_already_done && !CalcCovFactor_already_done makes no sense
if (y_data != nullptr) {
SetY(y_data);
}
if (!CalcCovFactor_already_done) {
const vec_t cov_pars_vec = Eigen::Map<const vec_t>(cov_pars, num_cov_par_);
SetCovParsComps(cov_pars_vec);
CalcCovFactor(false, true, 1., false);//Create covariance matrix and factorize it
}
//Calculate quadratic form y^T Psi^-1 y
double yTPsiInvy;
CalcYTPsiIInvY<T_mat>(yTPsiInvy, true, 1, CalcYAux_already_done, CalcYtilde_already_done);
//Calculate log determinant
double log_det = 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
log_det -= D_inv_[cluster_i].diagonal().array().log().sum();
}
else {
if (only_grouped_REs_use_woodbury_identity_) {
log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum());
for (int j = 0; j < num_comps_total_; ++j) {
int num_rand_eff = cum_num_rand_eff_[cluster_i][j + 1] - cum_num_rand_eff_[cluster_i][j];
log_det += (num_rand_eff * std::log(re_comps_[cluster_i][j]->cov_pars_[0]));
}
}
else {
log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum());
}
}
}
negll = yTPsiInvy / 2. / cov_pars[0] + log_det / 2. + num_data_ / 2. * (std::log(cov_pars[0]) + std::log(2 * M_PI));
}//end EvalNegLogLikelihood
/*!
* \brief Calculate the value of the approximate negative marginal log-likelihood obtained when using the Laplace approximation
* \param y_data Response variable data
* \param cov_pars Values for covariance parameters of RE components
* \param[out] negll Approximate negative marginal log-likelihood
* \param fixed_effects Fixed effects component of location parameter
* \param InitializeModeCovMat If true, posterior mode is initialized to 0 and the covariance matrix is calculated. Otherwise, existing values are used
* \param CalcModePostRandEff_already_done If true, it is assumed that the posterior mode of the random effects has already been calculated
*/
void EvalLAApproxNegLogLikelihood(const double* y_data, const double* cov_pars, double& negll,
const double* fixed_effects = nullptr, bool InitializeModeCovMat = true, bool CalcModePostRandEff_already_done = false) {
if (y_data != nullptr) {
SetY(y_data);
}
else {
if (!CalcModePostRandEff_already_done) {
CHECK(y_has_been_set_);
}
}
if (InitializeModeCovMat) {
CHECK(cov_pars != nullptr);
}
if (CalcModePostRandEff_already_done) {
negll = neg_log_likelihood_;//Whenever the mode is calculated that likelihood is calculated as well. So we might as well just return the saved neg_log_likelihood_
}
else {//not CalcModePostRandEff_already_done
if (InitializeModeCovMat) {
//We reset the initial modes to 0. This is done to avoid that different calls to EvalLAApproxNegLogLikelihood lead to (very small) differences.
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->InitializeModeAvec();//TODO: maybe ommit this step?
}
const vec_t cov_pars_vec = Eigen::Map<const vec_t>(cov_pars, num_cov_par_);
SetCovParsComps(cov_pars_vec);
if (vecchia_approx_) {
CalcCovFactor(true, true, 1., false);
}
else {
CalcSigmaComps();
CalcCovMatrixNonGauss();
}
}//end InitializeModeCovMat
negll = -CalcModePostRandEff(fixed_effects);
}//end not CalcModePostRandEff_already_done
}//end EvalLAApproxNegLogLikelihood
/*!
* \brief Set the data used for making predictions (useful if the same data is used repeatedly, e.g., in validation of GPBoost)
* \param num_data_pred Number of data points for which predictions are made
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
*/
void SetPredictionData(int num_data_pred,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, const double* covariate_data_pred = nullptr) {
CHECK(num_data_pred > 0);
if (cluster_ids_data_pred == nullptr) {
cluster_ids_data_pred_.clear();
}
else {
cluster_ids_data_pred_ = std::vector<gp_id_t>(cluster_ids_data_pred, cluster_ids_data_pred + num_data_pred);
}
if (re_group_data_pred == nullptr) {
re_group_levels_pred_.clear();
}
else {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred_ = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred_);
}
if (re_group_rand_coef_data_pred == nullptr) {
re_group_rand_coef_data_pred_.clear();
}
else {
re_group_rand_coef_data_pred_ = std::vector<double>(re_group_rand_coef_data_pred, re_group_rand_coef_data_pred + num_data_pred * num_re_group_rand_coef_);
}
if (gp_coords_data_pred == nullptr) {
gp_coords_data_pred_.clear();
}
else {
gp_coords_data_pred_ = std::vector<double>(gp_coords_data_pred, gp_coords_data_pred + num_data_pred * dim_gp_coords_);
}
if (gp_rand_coef_data_pred == nullptr) {
gp_rand_coef_data_pred_.clear();
}
else {
gp_rand_coef_data_pred_ = std::vector<double>(gp_rand_coef_data_pred, gp_rand_coef_data_pred + num_data_pred * num_gp_rand_coef_);
}
if (covariate_data_pred == nullptr) {
covariate_data_pred_.clear();
}
else {
covariate_data_pred_ = std::vector<double>(covariate_data_pred, covariate_data_pred + num_data_pred * num_coef_);
}
num_data_pred_ = num_data_pred;
}//end SetPredictionData
/*!
* \brief Make predictions: calculate conditional mean and variances or covariance matrix
* Note: You should pre-allocate memory for out_predict
* Its length is equal to num_data_pred if only the conditional mean is predicted (predict_cov_mat==false && predict_var==false)
* or num_data_pred * (1 + num_data_pred) if the predictive covariance matrix is also calculated (predict_cov_mat==true)
* or num_data_pred * 2 if predictive variances are also calculated (predict_var==true)
* \param cov_pars_pred Covariance parameters of components
* \param y_obs Response variable for observed data
* \param num_data_pred Number of data points for which predictions are made
* \param[out] out_predict Predictive/conditional mean at prediciton points followed by the predictive covariance matrix in column-major format (if predict_cov_mat==true) or the predictive variances (if predict_var==true)
* \param calc_cov_factor If true, the covariance matrix of the observed data is factorized otherwise a previously done factorization is used (default=true)
* \param predict_cov_mat If true, the predictive/conditional covariance matrix is calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param predict_var If true, the predictive/conditional variances are calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param predict_response If true, the response variable (label) is predicted, otherwise the latent random effects (this is only relevant for non-Gaussian data) (default=false)
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
* \param coef_pred Coefficients for linear covariates
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param use_saved_data If true, saved data is used and some arguments are ignored
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions (-1 means that the value already set at initialization is used)
* \param fixed_effects Fixed effects component of location parameter for observed data (only used for non-Gaussian data)
* \param fixed_effects_pred Fixed effects component of location parameter for predicted data (only used for non-Gaussian data)
*/
void Predict(const double* cov_pars_pred, const double* y_obs, data_size_t num_data_pred,
double* out_predict, bool calc_cov_factor = true, bool predict_cov_mat = false, bool predict_var = false, bool predict_response = false,
const double* covariate_data_pred = nullptr, const double* coef_pred = nullptr,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, bool use_saved_data = false,
const char* vecchia_pred_type = nullptr, int num_neighbors_pred = -1,
const double* fixed_effects = nullptr, const double* fixed_effects_pred = nullptr) {
//First check whether previously set data should be used and load it if required
std::vector<std::vector<string_t>> re_group_levels_pred;//Matrix with group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
if (use_saved_data) {
if (num_data_pred > 0) {
CHECK(num_data_pred == num_data_pred_);
}
else {
num_data_pred = num_data_pred_;
}
re_group_levels_pred = re_group_levels_pred_;
if (cluster_ids_data_pred_.empty()) {
cluster_ids_data_pred = nullptr;
}
else {
cluster_ids_data_pred = cluster_ids_data_pred_.data();
}
if (re_group_rand_coef_data_pred_.empty()) {
re_group_rand_coef_data_pred = nullptr;
}
else {
re_group_rand_coef_data_pred = re_group_rand_coef_data_pred_.data();
}
if (gp_coords_data_pred_.empty()) {
gp_coords_data_pred = nullptr;
}
else {
gp_coords_data_pred = gp_coords_data_pred_.data();
}
if (gp_rand_coef_data_pred_.empty()) {
gp_rand_coef_data_pred = nullptr;
}
else {
gp_rand_coef_data_pred = gp_rand_coef_data_pred_.data();
}
if (covariate_data_pred_.empty()) {
covariate_data_pred = nullptr;
}
else {
covariate_data_pred = covariate_data_pred_.data();
}
}
else {
if (re_group_data_pred != nullptr) {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred);
}
}
//Some checks
CHECK(num_data_pred > 0);
//Check whether required data is missing
if (re_group_rand_coef_data_pred == nullptr && num_re_group_rand_coef_ > 0) {
Log::REFatal("Missing covariate data for random coefficients for grouped random effects for making predictions");
}
if (gp_coords_data_pred == nullptr && num_gp_ > 0) {
Log::REFatal("Missing coordinate data for Gaussian process for making predictions");
}
if (gp_rand_coef_data_pred == nullptr && num_gp_rand_coef_ > 0) {
Log::REFatal("Missing covariate data for random coefficients for Gaussian process for making predictions");
}
if (cluster_ids_data_pred == nullptr && num_clusters_ > 1) {
Log::REFatal("Missing cluster_id data for making predictions");
}
if (!gauss_likelihood_ && predict_response && predict_cov_mat) {
Log::REFatal("Calculation of the predictive covariance matrix is not supported when predicting the response variable (label) for non-Gaussian data");
}
if (predict_cov_mat && predict_var) {
Log::REFatal("Calculation of both the predictive covariance matrix and variances is not supported. Choose one of these option (predict_cov_mat or predict_var)");
}
if (vecchia_approx_ && gauss_likelihood_ && predict_var) {
Log::REDebug("Calculation of only predictive variances is currently not optimized for the Vecchia approximation. If you need only variances and this takes too much time or memory, contact the developer or open a GitHub issue.");
}
if (has_covariates_) {
CHECK(covariate_data_pred != nullptr);
CHECK(coef_pred != nullptr);
}
if (y_obs == nullptr) {
if (!y_has_been_set_) {
Log::REFatal("Response variable data is not provided and has not been set before");
}
}
if (num_data_pred > 10000 && predict_cov_mat) {
double num_mem_d = ((double)num_data_pred) * ((double)num_data_pred);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
Log::REWarning("The covariance matrix can be very large for large sample sizes which might lead to memory limitations. In your case (n = %d), the covariance needs at least approximately %d mb of memory. If you only need variances or covariances for linear combinations, contact the developer of this package or open a GitHub issue and ask to implement this feature.", num_data_pred, mem_size);
}
if (vecchia_approx_) {
if (vecchia_pred_type != nullptr) {
string_t vecchia_pred_type_S = std::string(vecchia_pred_type);
if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_S) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) {
Log::REFatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_S.c_str());
}
vecchia_pred_type_ = vecchia_pred_type_S;
}
if (num_neighbors_pred > 0) {
num_neighbors_pred_ = num_neighbors_pred;
}
}
// Initialize linear predictor related terms and covariance parameters
vec_t coef, mu;//mu = linear regression predictor
if (has_covariates_) {//calculate linear regression term
coef = Eigen::Map<const vec_t>(coef_pred, num_coef_);
den_mat_t X_pred = Eigen::Map<const den_mat_t>(covariate_data_pred, num_data_pred, num_coef_);
mu = X_pred * coef;
}
vec_t cov_pars = Eigen::Map<const vec_t>(cov_pars_pred, num_cov_par_);
//Set up cluster IDs
std::map<gp_id_t, int> num_data_per_cluster_pred;
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_pred;
std::vector<gp_id_t> unique_clusters_pred;
data_size_t num_clusters_pred;
SetUpGPIds(num_data_pred, cluster_ids_data_pred, num_data_per_cluster_pred,
data_indices_per_cluster_pred, unique_clusters_pred, num_clusters_pred);
//Check whether predictions are made for existing clusters or if only for new independet clusters predictions are made
bool pred_for_observed_data = false;
for (const auto& cluster_i : unique_clusters_pred) {
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) != unique_clusters_.end()) {
pred_for_observed_data = true;
break;
}
}
//Factorize covariance matrix and calculate Psi^{-1}y_obs or calculate Laplace approximation (if required)
const double* fixed_effects_ptr = fixed_effects;
vec_t fixed_effects_vec;
if (pred_for_observed_data) {//TODO (low prio): this acutally needs to be done only for the GP realizations for which predictions are made (currently it is done for all of them in unique_clusters_pred)
// Set response data and fixed effects
if (gauss_likelihood_) {
if (has_covariates_ || fixed_effects != nullptr) {
vec_t resid;
if (y_obs != nullptr) {
resid = Eigen::Map<const vec_t>(y_obs, num_data_);
}
else {
resid = y_vec_;
}
if (has_covariates_) {
resid -= X_ * coef;
}
//add external fixed effects to linear predictor
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
resid[i] -= fixed_effects[i];
}
}
SetY(resid.data());
}//end if has_covariates_
else {//no covariates
if (y_obs != nullptr) {
SetY(y_obs);
}
}//end no covariates
}//end if gauss_likelihood_
else {//if not gauss_likelihood_
if (has_covariates_) {
fixed_effects_vec = X_ * coef;
//add external fixed effects to linear predictor
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
fixed_effects_vec[i] += fixed_effects[i];
}
}
fixed_effects_ptr = fixed_effects_vec.data();
}
if (y_obs != nullptr) {
SetY(y_obs);
}
}//end if not gauss_likelihood_
SetCovParsComps(cov_pars);
if (!(vecchia_approx_ && gauss_likelihood_)) {// no need to call CalcCovFactor here for the Vecchia approximation for Gaussian data, this is done in the prediction steps below
if (calc_cov_factor) {
if (gauss_likelihood_) {
CalcCovFactor(false, true, 1., false);// Create covariance matrix and factorize it
}
else {//not gauss_likelihood_
//We reset the initial modes to 0. This is done to avoid that different calls to the prediction function lead to (very small) differences
// as the mode is calculated from different starting values.
// If one is willing to accept these (very) small differences, one could disable this with the advantage of having faster predictions
// as the mode does not need to be found anew.
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->InitializeModeAvec();
}
if (vecchia_approx_) {
CalcCovFactor(false, true, 1., false);
}
else {
CalcSigmaComps();
CalcCovMatrixNonGauss();
}
CalcModePostRandEff(fixed_effects_ptr);
}//end not gauss_likelihood_
}//end if calc_cov_factor
if (gauss_likelihood_) {
CalcYAux();//note: in some cases a call to CalcYAux() could be avoided (e.g. no covariates and not GPBoost algorithm)...
}
}//end not (vecchia_approx_ && gauss_likelihood_)
}//end if pred_for_observed_data (factorizatiion of covariance matrix)
// Loop over different clusters to calculate predictions
for (const auto& cluster_i : unique_clusters_pred) {
//Case 1: no data observed for this Gaussian process with ID 'cluster_i'
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) == unique_clusters_.end()) {
T_mat psi;
std::vector<std::shared_ptr<RECompBase<T_mat>>> re_comps_cluster_i;
int num_REs_pred = num_data_per_cluster_pred[cluster_i];
//Calculate covariance matrix if needed
if (predict_cov_mat || predict_var || predict_response) {
if (vecchia_approx_) {
//TODO: move this code out into another function for better readability
// Initialize RE components
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
CreateREComponentsVecchia(num_data_pred, data_indices_per_cluster_pred, cluster_i, num_data_per_cluster_pred,
gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i, "none", num_neighbors_pred_);//TODO: maybe also use ordering for making predictions? (need to check that there are not errors)
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
}
// Calculate a Cholesky factor
sp_mat_t B_cluster_i;
sp_mat_t D_inv_cluster_i;
std::vector<sp_mat_t> B_grad_cluster_i;//not used, but needs to be passed to function
std::vector<sp_mat_t> D_grad_cluster_i;//not used, but needs to be passed to function
CalcCovFactorVecchia(num_data_per_cluster_pred[cluster_i], false, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i,
B_cluster_i, D_inv_cluster_i, B_grad_cluster_i, D_grad_cluster_i);
//Calculate Psi
sp_mat_t D_sqrt(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
D_sqrt.setIdentity();
D_sqrt.diagonal().array() = D_inv_cluster_i.diagonal().array().pow(-0.5);
sp_mat_t B_inv_D_sqrt;
eigen_sp_Lower_sp_RHS_cs_solve(B_cluster_i, D_sqrt, B_inv_D_sqrt, true);
psi = B_inv_D_sqrt * B_inv_D_sqrt.transpose();
}//end vecchia_approx_
else {//not vecchia_approx_
CreateREComponents(num_data_pred,
num_re_group_,
data_indices_per_cluster_pred,
cluster_i,
re_group_levels_pred,
num_data_per_cluster_pred,
num_re_group_rand_coef_,
re_group_rand_coef_data_pred,
ind_effect_group_rand_coef_,
num_gp_,
gp_coords_data_pred,
dim_gp_coords_,
gp_rand_coef_data_pred,
num_gp_rand_coef_,
cov_fct_,
cov_fct_shape_,
ind_intercept_gp_,
true,
re_comps_cluster_i);
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
num_REs_pred = re_comps_cluster_i[0]->GetNumUniqueREs();
}
else {
num_REs_pred = num_data_per_cluster_pred[cluster_i];
}
psi.resize(num_REs_pred, num_REs_pred);
if (gauss_likelihood_) {
psi.setIdentity();//nugget effect
}
else {
psi.setZero();
}
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
re_comps_cluster_i[j]->CalcSigma();
psi += (*(re_comps_cluster_i[j]->GetZSigmaZt().get()));
}
}//end not vecchia_approx_
if (gauss_likelihood_) {
psi *= cov_pars[0];//back-transform
}
}//end calculation of covariance matrix
// Add external fixed_effects
vec_t mean_pred_id = vec_t::Zero(num_data_per_cluster_pred[cluster_i]);
if (fixed_effects_pred != nullptr) {//add externaly provided fixed effects
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += fixed_effects_pred[data_indices_per_cluster_pred[cluster_i][i]];
}
}
if (has_covariates_) {//add linear regression predictor
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += mu[data_indices_per_cluster_pred[cluster_i][i]];
}
}
bool predict_var_or_response = predict_var || (predict_response && !gauss_likelihood_);
vec_t var_pred_id;
if (predict_var_or_response) {
var_pred_id = psi.diagonal();
}
//map from predictions from random effects scale b to "data scale" Zb
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
if (predict_var_or_response) {
vec_t var_pred_id_on_RE_scale = var_pred_id;
var_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
var_pred_id[i] = var_pred_id_on_RE_scale[(re_comps_cluster_i[0]->random_effects_indices_of_data_)[i]];
}
}
if (predict_cov_mat) {
T_mat cov_mat_pred_id_on_RE_scale = psi;
sp_mat_t Zpred(num_data_per_cluster_pred[cluster_i], num_REs_pred);
std::vector<Triplet_t> triplets(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
triplets[i] = Triplet_t(i, (re_comps_cluster_i[0]->random_effects_indices_of_data_)[i], 1.);
}
Zpred.setFromTriplets(triplets.begin(), triplets.end());
psi = Zpred * cov_mat_pred_id_on_RE_scale * Zpred.transpose();
}
}//end only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_
// Transform to response scale for non-Gaussian data if needed
if (!gauss_likelihood_ && predict_response) {
likelihood_[unique_clusters_[0]]->PredictResponse(mean_pred_id, var_pred_id, predict_var);
}
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i];
}
//Write covariance / variance on output
if (!predict_response || gauss_likelihood_) {//this is not done if predict_response==true for non-Gaussian data
if (predict_cov_mat) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = psi.coeff(j, i);
}
}
}//end predict_cov_mat
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i];
}
}//end predict_var
}//end !predict_response || gauss_likelihood_
else { // predict_response && !gauss_likelihood_
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i];
}
}//end predict_var
}//end write covariance / variance on output
}//end cluster_i with no observed data
else {
//Case 2: there exists observed data for this cluster_i (= typically case)
den_mat_t gp_coords_mat_pred;
std::vector<data_size_t> random_effects_indices_of_data_pred;
int num_REs_pred = num_data_per_cluster_pred[cluster_i];
if (num_gp_ > 0) {
std::vector<double> gp_coords_pred;
for (int j = 0; j < dim_gp_coords_; ++j) {
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
gp_coords_pred.push_back(gp_coords_data_pred[j * num_data_pred + id]);
}
}
gp_coords_mat_pred = Eigen::Map<den_mat_t>(gp_coords_pred.data(), num_data_per_cluster_pred[cluster_i], dim_gp_coords_);
}
if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
random_effects_indices_of_data_pred = std::vector<data_size_t>(num_data_per_cluster_pred[cluster_i]);
std::vector<string_t> re_group_levels_pred_unique;
std::map<re_group_t, int> map_group_label_index_pred;
int num_group_pred = 0;
int ii = 0;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
if (map_group_label_index_pred.find(re_group_levels_pred[0][id]) == map_group_label_index_pred.end()) {
map_group_label_index_pred.insert({ re_group_levels_pred[0][id], num_group_pred });
re_group_levels_pred_unique.push_back(re_group_levels_pred[0][id]);
random_effects_indices_of_data_pred[ii] = num_group_pred;
num_group_pred += 1;
}
else {
random_effects_indices_of_data_pred[ii] = map_group_label_index_pred[re_group_levels_pred[0][id]];
}
ii += 1;
}
re_group_levels_pred[0] = re_group_levels_pred_unique;
num_REs_pred = (int)re_group_levels_pred[0].size();
}//end only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
else if (only_one_GP_calculations_on_RE_scale_) {
random_effects_indices_of_data_pred = std::vector<data_size_t>(num_data_per_cluster_pred[cluster_i]);
std::vector<int> uniques;//unique points
std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates
DetermineUniqueDuplicateCoords(gp_coords_mat_pred, num_data_per_cluster_pred[cluster_i], uniques, unique_idx);
#pragma omp for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
random_effects_indices_of_data_pred[i] = unique_idx[i];
}
den_mat_t gp_coords_mat_pred_unique = gp_coords_mat_pred(uniques, Eigen::all);
gp_coords_mat_pred = gp_coords_mat_pred_unique;
num_REs_pred = (int)gp_coords_mat_pred.rows();
}//end only_one_GP_calculations_on_RE_scale_
// Initialize predictive mean and covariance
vec_t mean_pred_id;
if (only_one_GP_calculations_on_RE_scale_ ||
only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
mean_pred_id = vec_t(num_REs_pred);
}
else {
mean_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
}
T_mat cov_mat_pred_id;
vec_t var_pred_id;
// Calculate predictions
//Special case: Vecchia aproximation for Gaussian data
if (vecchia_approx_ && gauss_likelihood_) {//TODO: move this code to another function for better readability
std::shared_ptr<RECompGP<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][ind_intercept_gp_]);
int num_data_tot = num_data_per_cluster_[cluster_i] + num_data_per_cluster_pred[cluster_i];
double num_mem_d = ((double)num_neighbors_pred_) * ((double)num_neighbors_pred_) * (double)(num_data_tot)+(double)(num_neighbors_pred_) * (double)(num_data_tot);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 4000) {
Log::REDebug("The current implementation of the Vecchia approximation needs a lot of memory if the number of neighbors is large. In your case (nb. of neighbors = %d, nb. of observations = %d, nb. of predictions = %d), this needs at least approximately %d mb of memory. If this is a problem for you, contact the developer of this package or open a GitHub issue and ask to change this.", num_neighbors_pred_, num_data_per_cluster_[cluster_i], num_data_per_cluster_pred[cluster_i], mem_size);
}
//TODO: implement a more efficient version when only predictive variances are required and not full covariance matrices
bool predict_var_or_cov_mat = predict_var || predict_cov_mat;
if (vecchia_pred_type_ == "order_obs_first_cond_obs_only") {
CalcPredVecchiaObservedFirstOrder(true, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_obs_first_cond_all") {
CalcPredVecchiaObservedFirstOrder(false, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_pred_first") {
CalcPredVecchiaPredictedFirstOrder(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_obs_only") {
CalcPredVecchiaLatentObservedFirstOrder(true, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_all") {
CalcPredVecchiaLatentObservedFirstOrder(false, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
if (predict_var) {
var_pred_id = cov_mat_pred_id.diagonal();
if (!predict_cov_mat) {
cov_mat_pred_id.resize(0, 0);
}
}
}//end (vecchia_approx_ && gauss_likelihood_)
else {// not vecchia_approx_ or not gauss_likelihood_
//General case: either non-Gaussian data or Gaussian data without the Vecchia approximation
//NOTE: if vecchia_approx_==true and gauss_likelihood_==false, the cross-covariance matrix Sigma_{1,2} = cov(x_pred,x) is not approximated but the exact version is used
bool predict_var_or_response = predict_var || (predict_response && !gauss_likelihood_);//variance needs to be available for resposne prediction for non-Gaussian data
CalcPred(cluster_i,
num_data_pred,
num_data_per_cluster_pred,
data_indices_per_cluster_pred,
re_group_levels_pred,
re_group_rand_coef_data_pred,
gp_coords_mat_pred,
gp_rand_coef_data_pred,
predict_cov_mat,
predict_var_or_response,
mean_pred_id,
cov_mat_pred_id,
var_pred_id);
//map from predictions from random effects scale b to "data scale" Zb
if (only_one_GP_calculations_on_RE_scale_ ||
only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
vec_t mean_pred_id_on_RE_scale = mean_pred_id;
mean_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] = mean_pred_id_on_RE_scale[random_effects_indices_of_data_pred[i]];
}
if (predict_var_or_response) {
vec_t var_pred_id_on_RE_scale = var_pred_id;
var_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
var_pred_id[i] = var_pred_id_on_RE_scale[random_effects_indices_of_data_pred[i]];
}
}
if (predict_cov_mat) {
T_mat cov_mat_pred_id_on_RE_scale = cov_mat_pred_id;
sp_mat_t Zpred(num_data_per_cluster_pred[cluster_i], num_REs_pred);
std::vector<Triplet_t> triplets(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
triplets[i] = Triplet_t(i, random_effects_indices_of_data_pred[i], 1.);
}
Zpred.setFromTriplets(triplets.begin(), triplets.end());
cov_mat_pred_id = Zpred * cov_mat_pred_id_on_RE_scale * Zpred.transpose();
}
}//end only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
}//end not vecchia_approx_ or not gauss_likelihood_
//add externaly provided fixed effects
if (fixed_effects_pred != nullptr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += fixed_effects_pred[data_indices_per_cluster_pred[cluster_i][i]];
}
}
//add linear regression predictor
if (has_covariates_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += mu[data_indices_per_cluster_pred[cluster_i][i]];
}
}
if (!gauss_likelihood_ && predict_response) {
likelihood_[unique_clusters_[0]]->PredictResponse(mean_pred_id, var_pred_id, predict_var);
}
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i];
}
//Write covariance / variance on output
if (predict_cov_mat) {
if (gauss_likelihood_) {
cov_mat_pred_id *= cov_pars[0];
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = cov_mat_pred_id.coeff(j, i);
}
}
}//end predict_cov_mat
if (predict_var) {
if (gauss_likelihood_) {
var_pred_id *= cov_pars[0];
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i];
}
}//end predict_var
//end write covariance / variance on output
}//end cluster_i with data
}//end loop over cluster
//Set cross-covariances between different independent clusters to 0
if (predict_cov_mat && unique_clusters_pred.size() > 1 && (!predict_response || gauss_likelihood_)) {
for (const auto& cluster_i : unique_clusters_pred) {
for (const auto& cluster_j : unique_clusters_pred) {
if (cluster_i != cluster_j) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_j]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_j][j] + num_data_pred] = 0.;
}
}
}
}
}
}
}//end Predict
/*!
* \brief Find "reasonable" default values for the intial values of the covariance parameters (on transformed scale)
* Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters)
* \param y_data Response variable data
* \param[out] init_cov_pars Initial values for covariance parameters of RE components
*/
void FindInitCovPar(const double* y_data, double* init_cov_pars) {
double mean = 0;
double var = 0;
int ind_par;
if (gauss_likelihood_) {
//determine initial value for nugget effect
for (int i = 0; i < num_data_; ++i) {//TODO: run in parallel
mean += y_data[i];
}
mean /= num_data_;
for (int i = 0; i < num_data_; ++i) {
var += (y_data[i] - mean) * (y_data[i] - mean);
}
var /= (num_data_ - 1);
init_cov_pars[0] = var;
ind_par = 1;
}//end Gaussian data
else {//non-Gaussian data
ind_par = 0;
}
if (vecchia_approx_) {//Neither distances nor coordinates are saved for random coefficient GPs in the Vecchia approximation -> cannot find initial parameters -> just copy the ones from the intercept GP
// find initial values for intercept process
int num_par_j = ind_par_[1] - ind_par_[0];
vec_t pars = vec_t(num_par_j);
re_comps_[unique_clusters_[0]][0]->FindInitCovPar(pars);
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
//set the same values to random coefficient processes
for (int j = 1; j < num_gp_total_; ++j) {
num_par_j = ind_par_[j + 1] - ind_par_[j];
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
}
}
else {
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_j = ind_par_[j + 1] - ind_par_[j];
vec_t pars = vec_t(num_par_j);
re_comps_[unique_clusters_[0]][j]->FindInitCovPar(pars);
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
}
}
}//end FindInitCovPar
int num_cov_par() {
return(num_cov_par_);
}
/*!
* \brief Calculate the leaf values when performing a Newton update step after the tree structure has been found in tree-boosting
* Note: only used in GPBoost for combined Gaussian process tree-boosting (this is called from 'objective_function_->NewtonUpdateLeafValues'). It is assumed that 'CalcYAux' has been called before (from 'objective_function_->GetGradients').
* \param data_leaf_index Leaf index for every data point (array of size num_data)
* \param num_leaves Number of leaves
* \param[out] leaf_values Leaf values when performing a Newton update step (array of size num_leaves)
* \param marg_variance The marginal variance. Default = 1. Can be used to multiply values by it since Newton updates do not depend on it but 'CalcYAux' might have been called using marg_variance!=1.
*/
void NewtonUpdateLeafValues(const int* data_leaf_index,
const int num_leaves, double* leaf_values, double marg_variance = 1.) {
if (!gauss_likelihood_) {
Log::REFatal("Newton updates for leaf values is only supported for Gaussian data");
}
CHECK(y_aux_has_been_calculated_);//y_aux_ has already been calculated when calculating the gradient for finding the tree structure from 'GetGradients' in 'regression_objetive.hpp'
den_mat_t HTPsiInvH(num_leaves, num_leaves);
vec_t HTYAux(num_leaves);
HTPsiInvH.setZero();
HTYAux.setZero();
for (const auto& cluster_i : unique_clusters_) {
//Entries for matrix H_cluster_i = incidence matrix H that relates tree leaves to observations for cluster_i
std::vector<Triplet_t> entries_H_cluster_i(num_data_per_cluster_[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
entries_H_cluster_i[i] = Triplet_t(i, data_leaf_index[data_indices_per_cluster_[cluster_i][i]], 1.);
}
den_mat_t HTPsiInvH_cluster_i;
if (vecchia_approx_) {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);//row major format is needed for Vecchia approx.
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
sp_mat_t BH = B_[cluster_i] * H_cluster_i;
HTPsiInvH_cluster_i = den_mat_t(BH.transpose() * D_inv_[cluster_i] * BH);
}
else {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
if (only_grouped_REs_use_woodbury_identity_) {
sp_mat_t ZtH_cluster_i = Zt_[cluster_i] * H_cluster_i;
T_mat MInvSqrtZtH;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvSqrtZtH = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtH_cluster_i;
}
else {
CalcPsiInvSqrtH(ZtH_cluster_i, MInvSqrtZtH, cluster_i, true);
}
HTPsiInvH_cluster_i = H_cluster_i.transpose() * H_cluster_i - MInvSqrtZtH.transpose() * MInvSqrtZtH;
}
else {
T_mat PsiInvSqrtH;
CalcPsiInvSqrtH(H_cluster_i, PsiInvSqrtH, cluster_i, true);
HTPsiInvH_cluster_i = PsiInvSqrtH.transpose() * PsiInvSqrtH;
}
}
HTPsiInvH += HTPsiInvH_cluster_i;
}
HTYAux *= marg_variance;
vec_t new_leaf_values = HTPsiInvH.llt().solve(HTYAux);
for (int i = 0; i < num_leaves; ++i) {
leaf_values[i] = new_leaf_values[i];
}
}//end NewtonUpdateLeafValues
private:
// RESPONSE DATA
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief If true, the response variables have a Gaussian likelihood, otherwise not */
data_size_t gauss_likelihood_ = true;
/*! \brief Likelihood objects */
std::map<gp_id_t, std::unique_ptr<Likelihood<T_chol>>> likelihood_;
/*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data */
double neg_log_likelihood_;
/*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data of previous iteration in optimization used for convergence checking */
double neg_log_likelihood_lag1_;
/*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data after linear regression coefficients are update (this equals neg_log_likelihood_lag1_ if there are no regression coefficients). This is used for step-size checking for the covariance parameters */
double neg_log_likelihood_after_lin_coef_update_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: data y */
std::map<gp_id_t, vec_t> y_;
/*! \brief Copy of response data (used only for Gaussian data and if there are also linear covariates since then y_ is modified during the optimization algorithm and this contains the original data) */
vec_t y_vec_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: data y of integer type (used only for non-Gaussian likelihood) */
std::map<gp_id_t, vec_int_t> y_int_;
// Note: the response variable data is saved in y_ / y_int_ (depending on the likelihood type) for Gaussian data with no covariates and for all non-Gaussian data.
// For Gaussian data with covariates, the response variables is saved in y_vec_ and y_ is replaced by y - X * beta during the optimization
/*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */
std::map<gp_id_t, vec_t> y_aux_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when only_grouped_REs_use_woodbury_identity_==true) */
std::map<gp_id_t, vec_t> y_tilde_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: Z * L ^ -T * L ^ -1 * Z ^ T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when only_grouped_REs_use_woodbury_identity_==true) */
std::map<gp_id_t, vec_t> y_tilde2_;
/*! \brief Indicates whether y_aux_ has been calculated */
bool y_aux_has_been_calculated_ = false;
/*! \brief If true, the response variable data has been set (otherwise y_ is empty) */
bool y_has_been_set_ = false;
// GROUPED RANDOM EFFECTS
/*! \brief Number of grouped (intercept) random effects */
data_size_t num_re_group_ = 0;
/*! \brief Number of grouped random coefficients */
data_size_t num_re_group_rand_coef_ = 0;
/*! \brief Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting starts at 1 (and ends at the number of base intercept random effects). Length of vector = num_re_group_rand_coef_. */
std::vector<int> ind_effect_group_rand_coef_;
/*! \brief Total number of grouped random effects (random intercepts plus random coefficients (slopes)) */
data_size_t num_re_group_total_ = 0;
// GAUSSIAN PROCESS
/*! \brief 1 if there is a Gaussian process 0 otherwise */
data_size_t num_gp_ = 0;
/*! \brief Type of GP. 0 = classical (spatial) GP, 1 = spatio-temporal GP */ //TODO: remove?
int8_t GP_type_ = 0;
/*! \brief Number of random coefficient GPs */
data_size_t num_gp_rand_coef_ = 0;
/*! \brief Total number of GPs (random intercepts plus random coefficients) */
data_size_t num_gp_total_ = 0;
/*! \brief Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs */
int ind_intercept_gp_;
/*! \brief Dimension of the coordinates (=number of features) for Gaussian process */
int dim_gp_coords_ = 2;//required to save since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Type of covariance(kernel) function for Gaussian processes */
string_t cov_fct_ = "exponential";//required to also save here since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */
double cov_fct_shape_ = 0.;
// RANDOM EFFECT / GP COMPONENTS
/*! \brief Keys: labels of independent realizations of REs/GPs, values: vectors with individual RE/GP components */
std::map<gp_id_t, std::vector<std::shared_ptr<RECompBase<T_mat>>>> re_comps_;
/*! \brief Indices of parameters of RE components in global parameter vector cov_pars. ind_par_[i] and ind_par_[i+1] -1 are the indices of the first and last parameter of component number i (counting starts at 1) */
std::vector<data_size_t> ind_par_;
/*! \brief Number of covariance parameters */
data_size_t num_cov_par_;
/*! \brief Total number of random effect components (grouped REs plus other GPs) */
data_size_t num_comps_total_ = 0;
// SPECIAL CASES OF RE MODELS FOR FASTER CALCULATIONS
/*! \brief If true, the Woodbury, Sherman and Morrison matrix inversion formula is used for calculating the inverse of the covariance matrix (only used if there are only grouped REs and no Gaussian processes) */
bool only_grouped_REs_use_woodbury_identity_ = false;
/*! \brief True if there is only one grouped random effect component, and (all) calculations are done on the b-scale instead of the Zb-scale (currently used only for non-Gaussian data) */
bool only_one_grouped_RE_calculations_on_RE_scale_ = false;
/*! \brief True if there is only one grouped random effect component for Gaussian data, can calculations for predictions (only) are done on the b-scale instead of the Zb-scale */
bool only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ = false;
/*! \brief True if there is only one GP random effect component, and calculations are done on the b-scale instead of the Zb-scale (currently used only for non-Gaussian data) */
bool only_one_GP_calculations_on_RE_scale_ = false;
// COVARIANCE MATRIX AND CHOLESKY FACTORS OF IT
/*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky decomposition solver of covariance matrices Psi (for Gaussian data) */
std::map<gp_id_t, T_chol> chol_facts_solve_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky factors of Psi matrices */ //TODO: above needed or can pattern be saved somewhere else?
std::map<gp_id_t, T_mat> chol_facts_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */ //TODO: remove and construct on demand?
std::map<gp_id_t, T_mat> Id_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */
std::map<gp_id_t, cs> Id_cs_;
/*! \brief If true, a symbolic decomposition is first done when calculating the Cholesky factor of the covariance matrix (only for sparse matrices) */
bool do_symbolic_decomposition_ = true;
/*! \brief Collects inverse covariance matrices Psi^{-1} (usually not saved, but used e.g. in Fisher scoring without the Vecchia approximation) */
std::map<gp_id_t, T_mat> psi_inv_;
/*! \brief Inverse covariance matrices Sigma^-1 of random effects. This is only used if only_grouped_REs_use_woodbury_identity_==true (if there are only grouped REs) */
std::map<gp_id_t, sp_mat_t> SigmaI_;
/*! \brief Pointer to covariance matrix of the random effects (sum of all components). This is only used for non-Gaussian data and if only_grouped_REs_use_woodbury_identity_==false. In the Gaussian case this needs not be saved */
std::map<gp_id_t, std::shared_ptr<T_mat>> ZSigmaZt_;
// COVARIATE DATA FOR LINEAR REGRESSION TERM
/*! \brief If true, the model linearly incluses covariates */
bool has_covariates_ = false;
/*! \brief Number of covariates */
int num_coef_;
/*! \brief Covariate data */
den_mat_t X_;
// OPTIMIZER PROPERTIES
/*! \brief List of supported optimizers for covariance parameters */
const std::set<string_t> SUPPORTED_OPTIM_COV_PAR_{ "gradient_descent", "fisher_scoring" };
/*! \brief List of supported optimizers for regression coefficients */
const std::set<string_t> SUPPORTED_OPTIM_COEF_{ "gradient_descent", "wls" };
/*! \brief List of supported convergence criteria used for terminating the optimization algorithm */
const std::set<string_t> SUPPORTED_CONV_CRIT_{ "relative_change_in_parameters", "relative_change_in_log_likelihood" };
/*! \brief Maximal number of steps for which step halving for the learning rate is done */
int MAX_NUMBER_HALVING_STEPS_ = 30;
// WOODBURY IDENTITY FOR GROUPED RANDOM EFFECTS ONLY
/*! \brief Collects matrices Z^T (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, sp_mat_t> Zt_;
/*! \brief Collects matrices Z^TZ (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, sp_mat_t> ZtZ_;
/*! \brief Collects vectors Z^Ty (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */
std::map<gp_id_t, vec_t> Zty_;
/*! \brief Cumulative number of random effects for components (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, std::vector<data_size_t>> cum_num_rand_eff_;//The random effects of component j start at cum_num_rand_eff_[0][j]+1 and end at cum_num_rand_eff_[0][j+1]
/*! \brief Sum of squared entries of Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */
std::map<gp_id_t, std::vector<double>> Zj_square_sum_;
/*! \brief Collects matrices Z^T * Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */
std::map<gp_id_t, std::vector<sp_mat_t>> ZtZj_;
/*! \brief Collects matrices L^-1 * Z^T * Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects and when Fisher scoring is done) */
std::map<gp_id_t, std::vector<T_mat>> LInvZtZj_;
// VECCHIA APPROXIMATION for GP
/*! \brief If true, the Veccia approximation is used for the Gaussian process */
bool vecchia_approx_ = false;
/*! \brief If true, a memory optimized version of the Vecchia approximation is used (at the expense of being slightly slower). THiS IS CURRENTLY NOT IMPLEMENTED */
bool vecchia_approx_optim_memory = false;
/*! \brief The number of neighbors used in the Vecchia approximation */
int num_neighbors_;
/*! \brief Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering */
string_t vecchia_ordering_ = "none";
/*! \brief The number of neighbors used in the Vecchia approximation for making predictions */
int num_neighbors_pred_;
/*! \brief Ordering used in the Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions */
string_t vecchia_pred_type_ = "order_obs_first_cond_obs_only";//This is saved here and not simply set in the prediction function since it needs to be used repeatedly in the GPBoost algorithm when making predictions in "regression_metric.hpp" and the way predictions are done for the Vecchia approximation should be decoupled from the boosting algorithm
/*! \brief List of supported covariance functions */
const std::set<string_t> SUPPORTED_VECCHIA_PRED_TYPES_{ "order_obs_first_cond_obs_only",
"order_obs_first_cond_all", "order_pred_first",
"latent_order_obs_first_cond_obs_only", "latent_order_obs_first_cond_all" };
/*! \brief Collects indices of nearest neighbors (used for Vecchia approximation) */
std::map<gp_id_t, std::vector<std::vector<int>>> nearest_neighbors_;
/*! \brief Distances between locations and their nearest neighbors (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_obs_neighbors_;
/*! \brief Distances between nearest neighbors for all locations (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_between_neighbors_;//TODO: this contains duplicate information (i.e. distances might be saved reduntly several times). But there is a trade-off between storage and computational speed. I currently don't see a way for saving unique distances without copying them when using the^m.
/*! \brief Outer product of covariate vector at observations and neighbors with itself. First index = cluster, second index = data point i, third index = GP number j (this is used only if the Vecchia approximation is used, this is handled saved directly in the GP component using Z_) */
std::map<gp_id_t, std::vector<std::vector<den_mat_t>>> z_outer_z_obs_neighbors_;
/*! \brief Collects matrices B = I - A (=Cholesky factor of inverse covariance) for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> B_;
/*! \brief Collects diagonal matrices D^-1 for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> D_inv_;
/*! \brief Collects derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> B_grad_;
/*! \brief Collects derivatives of matrices D for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> D_grad_;
/*! \brief Triplets for intializing the matrices B */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_;
/*! \brief Triplets for intializing the matrices B_grad */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_grad_;
// CLUSTERs of INDEPENDENT REALIZATIONS
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points */
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_;
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization */
std::map<gp_id_t, int> num_data_per_cluster_;
/*! \brief Number of independent realizations of the REs/GPs */
data_size_t num_clusters_;
/*! \brief Unique labels of independent realizations */
std::vector<gp_id_t> unique_clusters_;
/*! \brief Variance of idiosyncratic error term (nugget effect) */
double sigma2_;
// PREDICTION
/*! \brief Cluster IDs for prediction */
std::vector<gp_id_t> cluster_ids_data_pred_;
/*! \brief Levels of grouped RE for prediction */
std::vector<std::vector<string_t>> re_group_levels_pred_;
/*! \brief Covariate data for grouped random RE for prediction */
std::vector<double> re_group_rand_coef_data_pred_;
/*! \brief Coordinates for GP for prediction */
std::vector<double> gp_coords_data_pred_;
/*! \brief Covariate data for random GP for prediction */
std::vector<double> gp_rand_coef_data_pred_;
/*! \brief Covariate data for linear regression term */
std::vector<double> covariate_data_pred_;
/*! \brief Number of prediction points */
data_size_t num_data_pred_;
/*! \brief Nesterov schedule */
double NesterovSchedule(int iter, int momentum_schedule_version = 0,
double nesterov_acc_rate = 0.5, int momentum_offset = 2) {
if (iter < momentum_offset) {
return(0.);
}
else {
if (momentum_schedule_version == 0) {
return(nesterov_acc_rate);
}
else if (momentum_schedule_version == 1) {
return(1. - (3. / (6. + iter)));
}
else {
return(0.);
}
}
}
/*! \brief mutex for threading safe call */
std::mutex mutex_;
/*! \brief Constructs identity matrices if sparse matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
int dim_I = only_grouped_REs_use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i];
T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
cs Id_cs = cs();//same for cs type //TODO: construct this independently of Id_ , but then care need to be taken for deleting the pointer objects.
Id_cs.nzmax = dim_I;
Id_cs.m = dim_I;
Id_cs.n = dim_I;
Id_[cluster_i].makeCompressed();
Id_cs.p = reinterpret_cast<csi*>(Id_[cluster_i].outerIndexPtr());
Id_cs.i = reinterpret_cast<csi*>(Id_[cluster_i].innerIndexPtr());
Id_cs.x = Id_[cluster_i].valuePtr();
Id_cs.nz = -1;
Id_cs_.insert({ cluster_i, Id_cs });
}
/*! \brief Constructs identity matrices if dense matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
int dim_I = only_grouped_REs_use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i];
T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
}
/*!
* \brief Set response variable data y_ (and calculate Z^T * y if only_grouped_REs_use_woodbury_identity_ == true)
* \param y_data Response variable data
*/
void SetY(const double* y_data) {
if (gauss_likelihood_) {
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {
y_[unique_clusters_[0]] = Eigen::Map<const vec_t>(y_data, num_data_);//TODO: Is there a more efficient way that avoids copying?
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]];
}
}
}
if (only_grouped_REs_use_woodbury_identity_) {
CalcZtY();
}
}//end gauss_likelihood_
else {//not gauss_likelihood_
(*likelihood_[unique_clusters_[0]]).template CheckY<double>(y_data, num_data_);
if (likelihood_[unique_clusters_[0]]->label_type() == "int") {
for (const auto& cluster_i : unique_clusters_) {
y_int_[cluster_i] = vec_int_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_int_[cluster_i][j] = static_cast<int>(y_data[data_indices_per_cluster_[cluster_i][j]]);
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<int>(y_int_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "double") {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]];
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<double>(y_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
}//end not gauss_likelihood_
y_has_been_set_ = true;
}
/*!
* \brief Set response variable data y_ if data is of type float (used for GPBoost algorithm since labels are float)
* \param y_data Response variable data
*/
void SetY(const float* y_data) {
if (gauss_likelihood_) {
Log::REFatal("SetY is not implemented for Gaussian data and lables of type float (since it is not needed)");
}//end gauss_likelihood_
else {//not gauss_likelihood_
(*likelihood_[unique_clusters_[0]]).template CheckY<float>(y_data, num_data_);
if (likelihood_[unique_clusters_[0]]->label_type() == "int") {
for (const auto& cluster_i : unique_clusters_) {
y_int_[cluster_i] = vec_int_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_int_[cluster_i][j] = static_cast<int>(y_data[data_indices_per_cluster_[cluster_i][j]]);
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<int>(y_int_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "double") {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = static_cast<double>(y_data[data_indices_per_cluster_[cluster_i][j]]);
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<double>(y_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
}
y_has_been_set_ = true;
}
/*!
* \brief Return (last used) response variable data
* \param[out] y Response variable data (memory needs to be preallocated)
*/
void GetY(double* y) {
if (!y_has_been_set_) {
Log::REFatal("Respone variable data has not been set");
}
if (has_covariates_ && gauss_likelihood_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
y[i] = y_vec_[i];
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "double") {
for (const auto& cluster_i : unique_clusters_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
y[data_indices_per_cluster_[cluster_i][i]] = y_[cluster_i][i];
}
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "int") {
for (const auto& cluster_i : unique_clusters_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
y[data_indices_per_cluster_[cluster_i][i]] = y_int_[cluster_i][i];
}
}
}
}
/*!
* \brief Return covariate data
* \param[out] covariate_data covariate data
*/
void GetCovariateData(double* covariate_data) {
if (!has_covariates_) {
Log::REFatal("Model does not have covariates for a linear predictor");
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_ * num_coef_; ++i) {
covariate_data[i] = X_.data()[i];
}
}
/*!
* \brief Calculate Z^T*y (use only when only_grouped_REs_use_woodbury_identity_ == true)
*/
void CalcZtY() {
for (const auto& cluster_i : unique_clusters_) {
Zty_[cluster_i] = Zt_[cluster_i] * y_[cluster_i];
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). Array needs to be pre-allocated of length num_data_
*/
void GetYAux(double* y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_; ++j) {
y_aux[j] = y_aux_[unique_clusters_[0]][j];
}
}
else {
for (const auto& cluster_i : unique_clusters_) {
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_aux[data_indices_per_cluster_[cluster_i][j]] = y_aux_[cluster_i][j];
}
}
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). This vector needs to be pre-allocated of length num_data_
*/
void GetYAux(vec_t& y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {
y_aux = y_aux_[unique_clusters_[0]];
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_aux(data_indices_per_cluster_[cluster_i]) = y_aux_[cluster_i];
}
}
}
/*!
* \brief Calculate the gradient of the Laplace-approximated negative log-likelihood with respect to the fixed effects F (only used for non-Gaussian data)
* \param[out] grad_F Gradient of the Laplace-approximated negative log-likelihood with respect to the fixed effects F. This vector needs to be pre-allocated of length num_data_
* \param fixed_effects Fixed effects component of location parameter
*/
void CalcGradFLaplace(double* grad_F, const double* fixed_effects = nullptr) {
const double* fixed_effects_cluster_i_ptr = nullptr;
vec_t fixed_effects_cluster_i;
for (const auto& cluster_i : unique_clusters_) {
vec_t grad_F_cluster_i(num_data_per_cluster_[cluster_i]);
//map fixed effects to clusters (if needed)
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
fixed_effects_cluster_i_ptr = fixed_effects;
}
else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters
fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]];
}
fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data();
}
if (vecchia_approx_) {//vecchia_approx_
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
B_grad_[cluster_i],
D_grad_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxGroupedRE<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
cum_num_rand_eff_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
else if (only_one_GP_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
re_comps_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
else {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxStable<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
re_comps_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
}//end not vecchia_approx_
//write on output
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
#pragma omp parallel for schedule(static)//write on output
for (int j = 0; j < num_data_; ++j) {
grad_F[j] = grad_F_cluster_i[j];
}
}
else {//more than one cluster and order of samples matters
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
grad_F[data_indices_per_cluster_[cluster_i][j]] = grad_F_cluster_i[j];
}
} // end more than one cluster
}//end loop over cluster
}//end CalcGradFLaplace
/*!
* \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if sparse matrices are used
* \param psi Covariance matrix for which the Cholesky decomposition should be done
* \param cluster_i Cluster index for which the Cholesky factor is calculated
* \param analyze_pattern If true, the pattern is analyzed as well (only for sparse matrices)
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
chol_facts_solve_[cluster_i].analyzePattern(psi);
}
chol_facts_solve_[cluster_i].factorize(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
chol_facts_[cluster_i].makeCompressed();
}
/*!
* \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if dense matrices are used
* \param psi Covariance matrix for which the Cholesky decomposition should be done
* \param cluster_i Cluster index for which the Cholesky factor is calculated
* \param analyze_pattern If true, the pattern is analyzed as well (only for sparse matrices)
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
Log::REWarning("Pattern of Cholesky factor is not analyzed when dense matrices are used.");
}
chol_facts_solve_[cluster_i].compute(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
}
/*!
* \brief Caclulate Psi^(-1) if sparse matrices are used
* \param psi_inv[out] Inverse covariance matrix
* \param cluster_i Cluster index for which Psi^(-1) is calculated
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
if (only_grouped_REs_use_woodbury_identity_) {
sp_mat_t MInvSqrtZt;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvSqrtZt = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * Zt_[cluster_i];
}
else {
sp_mat_t L_inv;
eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Id_[cluster_i], L_inv, true);
MInvSqrtZt = L_inv * Zt_[cluster_i];
////Alternative option (crashes when eigen_sp_Lower_sp_RHS_cs_solve uses sp_Lower_sp_RHS_cs_solve / cs_spsolve due to Eigen bug)
//eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Zt_[cluster_i], MInvSqrtZt, true);
}
psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;//this is slow since n can be large (O(n^2*m))
psi_inv.diagonal().array() += 1.0;
}
else {
//Using CSparse function 'cs_spsolve'
cs L_cs = cs();//Prepare LHS
L_cs.nzmax = (int)chol_facts_[cluster_i].nonZeros();
L_cs.m = num_data_per_cluster_[cluster_i];
L_cs.n = num_data_per_cluster_[cluster_i];
L_cs.p = reinterpret_cast<csi*>(chol_facts_[cluster_i].outerIndexPtr());
L_cs.i = reinterpret_cast<csi*>(chol_facts_[cluster_i].innerIndexPtr());
L_cs.x = chol_facts_[cluster_i].valuePtr();
L_cs.nz = -1;
//Invert Cholesky factor
sp_mat_t L_inv;
sp_Lower_sp_RHS_cs_solve(&L_cs, &Id_cs_[cluster_i], L_inv, true);
psi_inv = L_inv.transpose() * L_inv;
////Version 2: doing sparse solving "by hand" but ignoring sparse RHS
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]);
//for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]);
//}
//const sp_mat_t L_inv = L_inv_dens.sparseView();
//psi_inv = L_inv.transpose() * L_inv;
////Version 1: let Eigen do the solving
//cpsi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
}
}
/*!
* \brief Caclulate Psi^(-1) if dense matrices are used
* \param psi_inv[out] Inverse covariance matrix
* \param cluster_i Cluster index for which Psi^(-1) is calculated
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
if (only_grouped_REs_use_woodbury_identity_) {//typically currently not called as only_grouped_REs_use_woodbury_identity_ is only true for grouped REs only i.e. sparse matrices
T3 MInvSqrtZt;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvSqrtZt = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * Zt_[cluster_i];
}
else {
MInvSqrtZt = Zt_[cluster_i];
#pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization?
for (int j = 0; j < (int)MInvSqrtZt.cols(); ++j) {
L_solve(chol_facts_[cluster_i].data(), (int)chol_facts_[cluster_i].cols(), MInvSqrtZt.data() + j * (int)MInvSqrtZt.cols());
}
}
psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;
psi_inv.diagonal().array() += 1.0;
}
else {
////Version 1
//psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
//Version 2: solving by hand
T3 L_inv = Id_[cluster_i];
#pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], L_inv.data() + j * num_data_per_cluster_[cluster_i]);
}
//chol_facts_[cluster_i].triangularView<Eigen::Lower>().solveInPlace(L_inv); //slower
psi_inv = L_inv.transpose() * L_inv;
// Using dpotri from LAPACK does not work since LAPACK is not installed
//int info = 0;
//int n = num_data_per_cluster_[cluster_i];
//int lda = num_data_per_cluster_[cluster_i];
//char* uplo = "L";
//den_mat_t M = chol_facts_[cluster_i];
//BLASFUNC(dpotri)(uplo, &n, M.data(), &lda, &info);
}
}
/*!
* \brief Caclulate Psi^(-0.5)H if sparse matrices are used. Used in 'NewtonUpdateLeafValues' and if only_grouped_REs_use_woodbury_identity_ == true
* \param H Right-hand side matrix H
* \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H)
* \param cluster_i Cluster index for which Psi^(-0.5)H is calculated
* \param lower true if chol_facts_[cluster_i] is a lower triangular matrix
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, gp_id_t cluster_i, bool lower = true) {
eigen_sp_Lower_sp_RHS_solve(chol_facts_[cluster_i], H, PsiInvSqrtH, lower);
//TODO: use eigen_sp_Lower_sp_RHS_cs_solve -> faster? (currently this crashes due to Eigen bug, see the definition of sp_Lower_sp_RHS_cs_solve for more details)
}
/*!
* \brief Caclulate Psi^(-0.5)H if dense matrices are used. Used in 'NewtonUpdateLeafValues' and if only_grouped_REs_use_woodbury_identity_ == true
* \param H Right-hand side matrix H
* \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H)
* \param cluster_i Cluster index for which Psi^(-0.5)H is calculated
* \param lower true if chol_facts_[cluster_i] is a lower triangular matrix
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, gp_id_t cluster_i, bool lower = true) {
PsiInvSqrtH = den_mat_t(H);
#pragma omp parallel for schedule(static)
for (int j = 0; j < H.cols(); ++j) {
if (lower) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]);
}
else {
L_t_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]);
}
}
}
///*!
//* \brief Caclulate X^TPsi^(-1)X
//* \param X Covariate data matrix X
//* \param[out] XT_psi_inv_X X^TPsi^(-1)X
//*/
// template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// L_solve(chol_facts_[cluster0].data(), num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
// //same for sparse matrices
// template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// sp_L_solve(chol_facts_[cluster0].valuePtr(), chol_facts_[cluster0].innerIndexPtr(), chol_facts_[cluster0].outerIndexPtr(),
// num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(chol_facts_[cluster_i].valuePtr(), chol_facts_[cluster_i].innerIndexPtr(), chol_facts_[cluster_i].outerIndexPtr(),
// num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
/*!
* \brief Caclulate X^TPsi^(-1)X
* \param X Covariate data matrix X
* \param[out] XT_psi_inv_X X^TPsi^(-1)X
*/
void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / idependent GP realization
if (vecchia_approx_) {
den_mat_t BX = B_[unique_clusters_[0]] * X;
XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX;
}
else {
if (only_grouped_REs_use_woodbury_identity_) {
den_mat_t ZtX = Zt_[unique_clusters_[0]] * X;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
den_mat_t MInvSqrtZtX = chol_facts_[unique_clusters_[0]].diagonal().array().inverse().matrix().asDiagonal() * ZtX;
XT_psi_inv_X = X.transpose() * X - MInvSqrtZtX.transpose() * MInvSqrtZtX;
}
else {
//TODO: use only one forward solve (sp_L_solve for sparse and sp_L_solve for dense matrices) instead of using Eigens solver which does two solves. But his requires a templace function since the Cholesky factor is T_mat
XT_psi_inv_X = X.transpose() * X - ZtX.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(ZtX);
}
}
else {
XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X);
}
}
}//end only one cluster / idependent GP realization
else {//more than one cluster and order of samples matters
XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
XT_psi_inv_X.setZero();
den_mat_t BX;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
}
else {
if (only_grouped_REs_use_woodbury_identity_) {
den_mat_t ZtX = Zt_[cluster_i] * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all);
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
den_mat_t MInvSqrtZtX = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtX;
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) -
MInvSqrtZtX.transpose() * MInvSqrtZtX;
}
else {
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) -
ZtX.transpose() * chol_facts_solve_[cluster_i].solve(ZtX);
}
}
else {
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all));
}
}
}
}//end more than one cluster
}
/*!
* \brief Initialize data structures for handling independent realizations of the Gaussian processes. Answers written on arguments.
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization)
* \param[out] num_data_per_cluster Keys: labels of independent clusters, values: number of data points per independent realization
* \param[out] data_indices_per_cluster Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param[out] unique_clusters Unique labels of independent realizations
* \param[out] num_clusters Number of independent clusters
*/
void SetUpGPIds(data_size_t num_data, const gp_id_t* cluster_ids_data,
std::map<gp_id_t, int>& num_data_per_cluster, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
std::vector<gp_id_t>& unique_clusters, data_size_t& num_clusters) {
if (cluster_ids_data != nullptr) {
for (int i = 0; i < num_data; ++i) {
if (num_data_per_cluster.find(cluster_ids_data[i]) == num_data_per_cluster.end()) {//first occurrence of cluster_ids_data[i]
unique_clusters.push_back(cluster_ids_data[i]);
num_data_per_cluster.insert({ cluster_ids_data[i], 1 });
std::vector<int> id;
id.push_back(i);
data_indices_per_cluster.insert({ cluster_ids_data[i], id });
}
else {
num_data_per_cluster[cluster_ids_data[i]] += 1;
data_indices_per_cluster[cluster_ids_data[i]].push_back(i);
}
}
num_clusters = (data_size_t)unique_clusters.size();
}
else {
unique_clusters.push_back(0);
num_data_per_cluster.insert({ 0, num_data });
num_clusters = 1;
std::vector<int> gp_id_vec(num_data);
for (int i = 0; i < num_data; ++i) {
gp_id_vec[i] = i;
}
data_indices_per_cluster.insert({ 0, gp_id_vec });
}
}
/*!
* \brief Convert characters in 'const char* re_group_data' to matrix (num_re_group x num_data) with strings of group labels
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param[out] Matrix of dimension num_re_group x num_data with strings of group labels for levels of grouped random effects
*/
void ConvertCharToStringGroupLevels(data_size_t num_data, data_size_t num_re_group,
const char* re_group_data, std::vector<std::vector<string_t>>& re_group_levels) {
int char_start = 0;
for (int ire = 0; ire < num_re_group; ++ire) {//TODO: catch / report potential error if format of re_group_data is not correct
for (int id = 0; id < num_data; ++id) {
int number_chars = 0;
while (re_group_data[char_start + number_chars] != '\0') {
number_chars++;
}
re_group_levels[ire][id] = std::string(re_group_data + char_start);
char_start += number_chars + 1;
}
}
}
/*!
* \brief Initialize likelihoods
* \param likelihood Likelihood name
*/
void InitializeLikelihoods(const string_t& likelihood) {
for (const auto& cluster_i : unique_clusters_) {
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
cum_num_rand_eff_[cluster_i][num_comps_total_]));
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->GetNumUniqueREs()));
}
else if (only_one_GP_calculations_on_RE_scale_) {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->GetNumUniqueREs()));
}
else {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
num_data_per_cluster_[cluster_i]));
}
if (!gauss_likelihood_) {
likelihood_[cluster_i]->InitializeModeAvec();
}
}
}
/*!
* \brief Function that determines
* (i) the indices (in ind_par_) of the covariance parameters of every random effect component in the vector of all covariance parameter
* (ii) the total number of covariance parameters
*/
void DetermineCovarianceParameterIndicesNumCovPars() {
// Determine num_cov_par_
num_cov_par_ = num_re_group_total_ + (2 * num_gp_total_);
if (gauss_likelihood_) {
num_cov_par_++;//nugget effect
}
// Determine ind_par_
ind_par_ = std::vector<data_size_t>();
//First re_comp has either index 0 or 1 (the latter if there is an nugget effect for Gaussian data)
if (gauss_likelihood_) {
ind_par_.push_back(1);
}
else {
ind_par_.push_back(0);
}
//Add indices of parameters of individual components in joint parameter vector
for (int j = 0; j < num_re_group_total_; ++j) {
ind_par_.push_back(ind_par_.back() + 1);//end points of parameter indices of components
}
for (int j = 0; j < num_gp_total_; ++j) {
ind_par_.push_back(ind_par_.back() + 2);//end points of parameter indices of components
}
}
/*!
* \brief Function that determines whether to use special options for estimation and prediction for certain special cases of random effects models
*/
void DetermineSpecialCasesModelsEstimationPrediction() {
// Decide whether to use the Woodbury identity (i.e. do matrix inversion on the b scale and not the Zb scale) for grouped random effects models only
if (num_re_group_ > 0 && num_gp_total_ == 0) {
do_symbolic_decomposition_ = true;//Symbolic decompostion is only done if sparse matrices are used
only_grouped_REs_use_woodbury_identity_ = true;//Faster to use Woodbury identity since the dimension of the random effects is typically much smaller than the number of data points
//Note: the use of the Woodburry identity is currently only implemented for grouped random effects (which is also the only use of it).
// If this should be applied to GPs in the future, adaptions need to be made e.g. in the calculations of the gradient (see y_tilde2_)
}
else {
do_symbolic_decomposition_ = false;
only_grouped_REs_use_woodbury_identity_ = false;
}
// Following are options that depend on the type of likelihood used
//Define options for faster calculations for special cases of RE models
only_one_GP_calculations_on_RE_scale_ = num_gp_total_ == 1 && num_comps_total_ == 1 && !gauss_likelihood_ && !vecchia_approx_;//If there is only one GP, we do calculations on the b-scale instead of Zb-scale (currently only for non-Gaussian data)
only_one_grouped_RE_calculations_on_RE_scale_ = num_re_group_total_ == 1 && num_comps_total_ == 1 && !gauss_likelihood_;//If there is only one grouped RE, we do (all) calculations on the b-scale instead of the Zb-scale (currently only for non-Gaussian data)
only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ = num_re_group_total_ == 1 && num_comps_total_ == 1 && gauss_likelihood_;//If there is only one grouped RE, we do calculations for prediction on the b-scale instead of the Zb-scale (only effective for Gaussian data)
}
/*!
* \brief Initialize required matrices used when only_grouped_REs_use_woodbury_identity_==true
*/
void InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity() {
CHECK(num_comps_total_ == num_re_group_total_);
Zt_ = std::map<gp_id_t, sp_mat_t>();
ZtZ_ = std::map<gp_id_t, sp_mat_t>();
cum_num_rand_eff_ = std::map<gp_id_t, std::vector<data_size_t>>();
Zj_square_sum_ = std::map<gp_id_t, std::vector<double>>();
ZtZj_ = std::map<gp_id_t, std::vector<sp_mat_t>>();
for (const auto& cluster_i : unique_clusters_) {
std::vector<data_size_t> cum_num_rand_eff_cluster_i(num_comps_total_ + 1);
cum_num_rand_eff_cluster_i[0] = 0;
//Determine number of rows and non-zero entries of Z
int non_zeros = 0;
int ncols = 0;
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
ncols += (int)Z_j->cols();
non_zeros += (int)Z_j->nonZeros();
cum_num_rand_eff_cluster_i[j + 1] = ncols;
}
//Create matrix Z and calculate sum(Z_j^2) = trace(Z_j^T * Z_j)
std::vector<Triplet_t> triplets;
triplets.reserve(non_zeros);
std::vector<double> Zj_square_sum_cluster_i(num_comps_total_);
int ncol_prev = 0;
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
for (int k = 0; k < Z_j->outerSize(); ++k) {
for (sp_mat_t::InnerIterator it(*Z_j, k); it; ++it) {
triplets.emplace_back(it.row(), ncol_prev + it.col(), it.value());
}
}
ncol_prev += (int)Z_j->cols();
Zj_square_sum_cluster_i[j] = Z_j->squaredNorm();
}
sp_mat_t Z_cluster_i(num_data_per_cluster_[cluster_i], ncols);
Z_cluster_i.setFromTriplets(triplets.begin(), triplets.end());
sp_mat_t Zt_cluster_i = Z_cluster_i.transpose();
sp_mat_t ZtZ_cluster_i = Zt_cluster_i * Z_cluster_i;
//Calculate Z^T * Z_j
std::vector<sp_mat_t> ZtZj_cluster_i(num_comps_total_);
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
ZtZj_cluster_i[j] = Zt_cluster_i * (*Z_j);
}
//Save all quantities
Zt_.insert({ cluster_i, Zt_cluster_i });
ZtZ_.insert({ cluster_i, ZtZ_cluster_i });
cum_num_rand_eff_.insert({ cluster_i, cum_num_rand_eff_cluster_i });
Zj_square_sum_.insert({ cluster_i, Zj_square_sum_cluster_i });
ZtZj_.insert({ cluster_i, ZtZj_cluster_i });
}
}
/*!
* \brief Initialize identity matrices required for Gaussian data
*/
void InitializeIdentityMatricesForGaussianData() {
if (gauss_likelihood_) {
for (const auto& cluster_i : unique_clusters_) {
ConstructI<T_mat>(cluster_i);//Idendity matrices needed for computing inverses of covariance matrices used in gradient descent for Gaussian data
}
}
}
/*!
* \brief Function that checks the compatibility of the chosen special options for estimation and prediction for certain special cases of random effects models
*/
void CheckCompatibilitySpecialOptions() {
//Some checks
if (only_one_GP_calculations_on_RE_scale_ && only_grouped_REs_use_woodbury_identity_) {
Log::REFatal("Cannot set both 'only_one_GP_calculations_on_RE_scale_' and 'only_grouped_REs_use_woodbury_identity_' to 'true'");
}
if (only_one_GP_calculations_on_RE_scale_ && only_one_grouped_RE_calculations_on_RE_scale_) {
Log::REFatal("Cannot set both 'only_one_GP_calculations_on_RE_scale_' and 'only_one_grouped_RE_calculations_on_RE_scale_' to 'true'");
}
if (vecchia_approx_) {//vecchia_approx_
if (num_re_group_total_ > 0) {
Log::REFatal("Vecchia approximation can currently not be used when there are grouped random effects");
}
}
if (only_one_GP_calculations_on_RE_scale_) {//only_one_GP_calculations_on_RE_scale_
if (gauss_likelihood_) {
Log::REFatal("Option 'only_one_GP_calculations_on_RE_scale_' is currently not implemented for Gaussian data");
}
if (vecchia_approx_) {
Log::REFatal("Option 'only_one_GP_calculations_on_RE_scale_' is currently not implemented for Vecchia approximation data");
}
CHECK(num_gp_total_ == 1);
CHECK(num_comps_total_ == 1);
CHECK(num_re_group_total_ == 0);
}
if (only_one_grouped_RE_calculations_on_RE_scale_) {//only_one_grouped_RE_calculations_on_RE_scale_
if (gauss_likelihood_) {
Log::REFatal("Option 'only_one_grouped_RE_calculations_on_RE_scale_' is currently not implemented for Gaussian data");
}
CHECK(!vecchia_approx_);
CHECK(num_gp_total_ == 0);
CHECK(num_comps_total_ == 1);
CHECK(num_re_group_total_ == 1);
}
if (only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {//only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
CHECK(!vecchia_approx_);
CHECK(num_gp_total_ == 0);
CHECK(num_comps_total_ == 1);
CHECK(num_re_group_total_ == 1);
if (!gauss_likelihood_) {
Log::REFatal("Option 'only_one_grouped_RE_calculations_on_RE_scale_for_prediction_' is currently only effective for Gaussian data");
}
}
if (only_grouped_REs_use_woodbury_identity_) {//only_grouped_REs_use_woodbury_identity_
if (gauss_likelihood_ && only_one_grouped_RE_calculations_on_RE_scale_) {
Log::REFatal("Cannot enable 'only_one_grouped_RE_calculations_on_RE_scale_' if 'only_grouped_REs_use_woodbury_identity_' is enabled for Gaussian data");
}
CHECK(num_gp_total_ == 0);
CHECK(num_comps_total_ == num_re_group_total_);
}
}
/*!
* \brief Initialize individual component models and collect them in a containter
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param Group levels for every grouped random effect
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param num_re_group_rand_coef Number of grouped random coefficients
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_gp Number of Gaussian processes (intercept only, random coefficients not counting)
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param ind_intercept_gp Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs
* \param calculateZZt If true, the matrix Z*Z^T is calculated for grouped random effects and saved (usually not needed if Woodbury identity is used)
* \param[out] re_comps_cluster_i Container that collects the individual component models
*/
void CreateREComponents(data_size_t num_data,
data_size_t num_re_group,
std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
gp_id_t cluster_i,
std::vector<std::vector<string_t>>& re_group_levels,
std::map<gp_id_t, int>& num_data_per_cluster,
data_size_t num_re_group_rand_coef,
const double* re_group_rand_coef_data,
std::vector<int>& ind_effect_group_rand_coef,
data_size_t num_gp,
const double* gp_coords_data,
int dim_gp_coords,
const double* gp_rand_coef_data,
data_size_t num_gp_rand_coef,
const string_t cov_fct,
double cov_fct_shape,
int ind_intercept_gp,
bool calculateZZt,
std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i) {
//Grouped REs
if (num_re_group > 0) {
for (int j = 0; j < num_re_group; ++j) {
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
group_data.push_back(re_group_levels[j][id]);//group_data_.push_back(std::string(re_group_data[j * num_data_ + id]));
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T_mat>>(new RECompGroup<T_mat>(
group_data,
calculateZZt,
!only_one_grouped_RE_calculations_on_RE_scale_)));
}
//Random slopes
if (num_re_group_rand_coef > 0) {
for (int j = 0; j < num_re_group_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_cluster_i[ind_effect_group_rand_coef[j] - 1]);//Subtract -1 since ind_effect_group_rand_coef[j] starts counting at 1 not 0
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T_mat>>(new RECompGroup<T_mat>(
re_comp->random_effects_indices_of_data_.data(),
re_comp->num_data_,
re_comp->map_group_label_index_,
re_comp->num_group_,
rand_coef_data,
calculateZZt)));
}
}
}
//GPs
if (num_gp > 0) {
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(
gp_coords_mat,
cov_fct,
cov_fct_shape,
true,
only_one_GP_calculations_on_RE_scale_)));
//Random slopes
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGP<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_cluster_i[ind_intercept_gp]);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(
re_comp->dist_,
re_comp->has_Z_,
&re_comp->Z_,
rand_coef_data,
cov_fct,
cov_fct_shape)));
}
}
}
}
/*!
* \brief Initialize individual component models and collect them in a containter when the Vecchia approximation is used
* \param num_data Number of data points
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param[out] re_comps_cluster_i Container that collects the individual component models
* \param[out] nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param[out] dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param[out] dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param[out] entries_init_B_cluster_i Triplets for intializing the matrices B
* \param[out] entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param[out] z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param num_neighbors The number of neighbors used in the Vecchia approximation
*/
void CreateREComponentsVecchia(data_size_t num_data,
std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
gp_id_t cluster_i,
std::map<gp_id_t, int>& num_data_per_cluster,
const double* gp_coords_data,
int dim_gp_coords,
const double* gp_rand_coef_data,
data_size_t num_gp_rand_coef,
const string_t cov_fct,
double cov_fct_shape,
std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i,
std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i,
std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i,
std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i,
string_t vecchia_ordering = "none",
int num_neighbors = 30) {
if (vecchia_ordering == "random") {
unsigned seed = 0;
std::shuffle(data_indices_per_cluster[cluster_i].begin(), data_indices_per_cluster[cluster_i].end(), std::default_random_engine(seed));
}
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(gp_coords_mat, cov_fct, cov_fct_shape, false, false)));
find_nearest_neighbors_Veccia_fast(gp_coords_mat, num_data_per_cluster[cluster_i], num_neighbors,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
for (int j = 0; j < (int)nearest_neighbors_cluster_i[i].size(); ++j) {
entries_init_B_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
entries_init_B_grad_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
}
entries_init_B_cluster_i.push_back(Triplet_t(i, i, 1.));//Put 1's on the diagonal since B = I - A
}
//Random coefficients
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(rand_coef_data, cov_fct, cov_fct_shape)));
//save random coefficient data in the form ot outer product matrices
#pragma omp for schedule(static)
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
if (j == 0) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef);
}
int dim_z = (i == 0) ? 1 : ((int)nearest_neighbors_cluster_i[i].size() + 1);
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
}
/*!
* \brief Set the covariance parameters of the components
* \param cov_pars Covariance parameters
*/
void SetCovParsComps(const vec_t& cov_pars) {
CHECK(cov_pars.size() == num_cov_par_);
if (gauss_likelihood_) {
sigma2_ = cov_pars[0];
}
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
re_comps_[cluster_i][j]->SetCovPars(pars);
}
}
}
/*!
* \brief Transform the covariance parameters to the scake on which the MLE is found
* \param cov_pars_trans Covariance parameters
* \param[out] pars_trans Transformed covariance parameters
*/
void TransformCovPars(const vec_t& cov_pars, vec_t& cov_pars_trans) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_trans = vec_t(num_cov_par_);
if (gauss_likelihood_) {
cov_pars_trans[0] = cov_pars[0];
}
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
vec_t pars_trans = pars;
if (gauss_likelihood_) {
re_comps_[unique_clusters_[0]][j]->TransformCovPars(cov_pars[0], pars, pars_trans);
}
else {
re_comps_[unique_clusters_[0]][j]->TransformCovPars(1., pars, pars_trans);
}
cov_pars_trans.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_trans;
}
}
/*!
* \brief Back-transform the covariance parameters to the original scale
* \param cov_pars Covariance parameters
* \param[out] cov_pars_orig Back-transformed, original covariance parameters
*/
void TransformBackCovPars(const vec_t& cov_pars, vec_t& cov_pars_orig) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_orig = vec_t(num_cov_par_);
if (gauss_likelihood_) {
cov_pars_orig[0] = cov_pars[0];
}
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
vec_t pars_orig = pars;
if (gauss_likelihood_) {
re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(cov_pars[0], pars, pars_orig);
}
else {
re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(1, pars, pars_orig);
}
cov_pars_orig.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_orig;
}
}
/*!
* \brief Calculate covariance matrices of the components
*/
void CalcSigmaComps() {
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
re_comps_[cluster_i][j]->CalcSigma();
}
}
}
/*!
* \brief Construct inverse covariance matrix Sigma^-1 if there are onla grouped random effecs (this is then a diagonal matrix)
* \param[out] SigmaI Inverse covariance matrix of random effects (a diagonal matrix)
* \param cluster_i Cluster index for which SigmaI is constructed
*/
void CalcSigmaIGroupedREsOnly(sp_mat_t& SigmaI, gp_id_t cluster_i) {
CHECK(!only_one_grouped_RE_calculations_on_RE_scale_);
std::vector<Triplet_t> triplets;
triplets.reserve(cum_num_rand_eff_[cluster_i][num_comps_total_]);
for (int j = 0; j < num_comps_total_; ++j) {
double sigmaI = re_comps_[cluster_i][j]->cov_pars_[0];
sigmaI = 1.0 / sigmaI;
for (int i = cum_num_rand_eff_[cluster_i][j]; i < cum_num_rand_eff_[cluster_i][j + 1]; ++i) {
triplets.emplace_back(i, i, sigmaI);
}
}
SigmaI = sp_mat_t(cum_num_rand_eff_[cluster_i][num_comps_total_], cum_num_rand_eff_[cluster_i][num_comps_total_]);
SigmaI.setFromTriplets(triplets.begin(), triplets.end());
}
/*!
* \brief Factorize the covariance matrix (Gaussian data) or
* calculate the posterior mode of the random effects for use in the Laplace approximation (non-Gaussian data)
* And calculate the negative log-likelihood (Gaussian data) or the negative approx. marginal log-likelihood (non-Gaussian data)
* \param cov_pars Covariance parameters
* \param fixed_effects Fixed effects component of location parameter
*/
void CalcCovFactorOrModeAndNegLL(vec_t& cov_pars, const double* fixed_effects = nullptr) {
SetCovParsComps(cov_pars);
if (gauss_likelihood_) {
CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used)
if (only_grouped_REs_use_woodbury_identity_) {
CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else {
CalcYAux();//y_aux = Psi^-1 * y
}
EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_likelihood_, true, true, true);
}//end gauss_likelihood_
else {//not gauss_likelihood_
if (vecchia_approx_) {
CalcCovFactor(true, true, 1., false);
}
else {
CalcSigmaComps();
CalcCovMatrixNonGauss();
}
neg_log_likelihood_ = -CalcModePostRandEff(fixed_effects);//calculate mode and approximate marginal likelihood
}//end not gauss_likelihood_
}//end CalcCovFactorOrModeAndNegLL
/*!
* \brief Update covariance parameters, apply step size safeguard, factorize covariance matrix, and calculate new value of objective function
* \param[out] cov_pars Covariance parameters
* \param nat_grad Gradient for gradient descent or = FI^-1 * gradient for Fisher scoring (="natural" gradient)
* \param[out] lr_cov Learning rate (can be written on in case it get decreased)
* \param profile_out_marginal_variance If true, the first parameter (marginal variance, nugget effect) is ignored
* \param use_nesterov_acc If true, Nesterov acceleration is used
* \param it Iteration number
* \param optimizer_cov Optimizer used
* \param[out] cov_pars_after_grad_aux Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param[out] cov_pars_after_grad_aux_lag1 Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param acc_rate_cov Nesterov acceleration speed
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param fixed_effects Fixed effects component of location parameter
*/
void UpdateCovPars(vec_t& cov_pars, const vec_t& nat_grad, double& lr_cov, bool profile_out_marginal_variance,
bool use_nesterov_acc, int it, const string_t& optimizer_cov, vec_t& cov_pars_after_grad_aux, vec_t& cov_pars_after_grad_aux_lag1,
double acc_rate_cov, int nesterov_schedule_version, int momentum_offset, const double* fixed_effects = nullptr) {
vec_t cov_pars_new(num_cov_par_);
if (profile_out_marginal_variance) {
cov_pars_new[0] = cov_pars[0];
}
double lr = lr_cov;
bool decrease_found = false;
bool halving_done = false;
for (int ih = 0; ih < MAX_NUMBER_HALVING_STEPS_; ++ih) {
if (profile_out_marginal_variance) {
cov_pars_new.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr * nat_grad.array()).exp().matrix();//make update on log-scale
}
else {
cov_pars_new = (cov_pars.array().log() - lr * nat_grad.array()).exp().matrix();//make update on log-scale
}
// Apply Nesterov acceleration
if (use_nesterov_acc) {
cov_pars_after_grad_aux = cov_pars_new;
ApplyMomentumStep(it, cov_pars_after_grad_aux, cov_pars_after_grad_aux_lag1, cov_pars_new, acc_rate_cov,
nesterov_schedule_version, profile_out_marginal_variance, momentum_offset, true);
// Note: (i) cov_pars_after_grad_aux and cov_pars_after_grad_aux_lag1 correspond to the parameters obtained after calculating the gradient before applying acceleration
// (ii) cov_pars (below this) are the parameters obtained after applying acceleration (and cov_pars_lag1 is simply the value of the previous iteration)
// We first apply a gradient step and then an acceleration step (and not the other way aroung) since this is computationally more efficient
// (otherwise the covariance matrix needs to be factored twice: once for the gradient step (accelerated parameters) and once for calculating the
// log-likelihood (non-accelerated parameters after gradient update) when checking for convergence at the end of an iteration.
// However, performing the acceleration before or after the gradient update gives equivalent algorithms
}
CalcCovFactorOrModeAndNegLL(cov_pars_new, fixed_effects);
// Safeguard agains too large steps by halving the learning rate when the objective increases
if (neg_log_likelihood_ <= neg_log_likelihood_after_lin_coef_update_) {
decrease_found = true;
break;
}
else {
halving_done = true;
lr *= 0.5;
acc_rate_cov *= 0.5;
if (!gauss_likelihood_) {
// Reset mode to previous value since also parameters are discarded
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->ResetModeToPreviousValue();
}
}
}
}
if (halving_done) {
if (optimizer_cov == "fisher_scoring") {
Log::REDebug("GPModel covariance parameter estimation: No decrease in the objective function in iteration number %d. The learning rate has been decreased in this iteration.", it + 1);
}
else if (optimizer_cov == "gradient_descent") {
lr_cov = lr; //permanently decrease learning rate (for Fisher scoring, this is not done. I.e., step halving is done newly in every iterarion of Fisher scoring)
Log::REDebug("GPModel covariance parameter estimation: The learning rate has been decreased permanently since with the previous learning rate, there was no decrease in the objective function in iteration number %d. New learning rate = %g", it + 1, lr_cov);
}
}
if (!decrease_found) {
Log::REDebug("GPModel covariance parameter estimation: No decrease in the objective function in iteration number %d after the maximal number of halving steps (%d).", it + 1, MAX_NUMBER_HALVING_STEPS_);
}
if (use_nesterov_acc) {
cov_pars_after_grad_aux_lag1 = cov_pars_after_grad_aux;
}
cov_pars = cov_pars_new;
}//end UpdateCovPars
/*!
* \brief Update linear regression coefficients and apply step size safeguard
* \param[out] beta Linear regression coefficients
* \param grad Gradient
* \param[out] lr_coef Learning rate (can be written on in case it get decreased)
* \param use_nesterov_acc If true, Nesterov acceleration is used
* \param it Iteration number
* \param[out] beta_after_grad_aux Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param[out] beta_after_grad_aux_lag1 Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param acc_rate_coef Nesterov acceleration speed
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param fixed_effects External fixed effects
* \param[out] fixed_effects_vec Fixed effects component of location parameter as sum of linear predictor and potentiall additional external fixed effects
*/
void UpdateLinCoef(vec_t& beta, const vec_t& grad, double& lr_coef, const vec_t& cov_pars,
bool use_nesterov_acc, int it, vec_t& beta_after_grad_aux, vec_t& beta_after_grad_aux_lag1,
double acc_rate_coef, int nesterov_schedule_version, int momentum_offset, const double* fixed_effects, vec_t& fixed_effects_vec) {
vec_t beta_new;
double lr = lr_coef;
vec_t resid;
bool decrease_found = false;
bool halving_done = false;
for (int ih = 0; ih < MAX_NUMBER_HALVING_STEPS_; ++ih) {
beta_new = beta - lr * grad;
// Apply Nesterov acceleration
if (use_nesterov_acc) {
beta_after_grad_aux = beta_new;
ApplyMomentumStep(it, beta_after_grad_aux, beta_after_grad_aux_lag1, beta_new, acc_rate_coef,
nesterov_schedule_version, false, momentum_offset, false);
//Note: use same version of Nesterov acceleration as for covariance parameters (see 'UpdateCovPars')
}
if (gauss_likelihood_) {
// Set resid for updating covariance parameters
resid = y_vec_ - (X_ * beta_new);
SetY(resid.data());
// Calculate y_aux = Psi^-1 * y (if not only_grouped_REs_use_woodbury_identity_) or y_tilde and y_tilde2 (if only_grouped_REs_use_woodbury_identity_) for covariance parameter update (only for Gaussian data)
if (only_grouped_REs_use_woodbury_identity_) {
CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else {
CalcYAux();//y_aux = Psi^-1 * y
}
EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_likelihood_after_lin_coef_update_, true, true, true);
}//end if gauss_likelihood_
else {//non-Gaussian data
fixed_effects_vec = X_ * beta_new;
if (fixed_effects != nullptr) {//add external fixed effects to linear predictor
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
fixed_effects_vec[i] += fixed_effects[i];
}
}
neg_log_likelihood_after_lin_coef_update_ = -CalcModePostRandEff(fixed_effects_vec.data());//calculate mode and approximate marginal likelihood
}
// Safeguard agains too large steps by halving the learning rate when the objective increases
if (neg_log_likelihood_after_lin_coef_update_ <= neg_log_likelihood_lag1_) {
decrease_found = true;
break;
}
else {
// Safeguard agains too large steps by halving the learning rate
halving_done = true;
lr *= 0.5;
acc_rate_coef *= 0.5;
if (!gauss_likelihood_) {
// Reset mode to previous value since also parameters are discarded
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->ResetModeToPreviousValue();
}
}
}
}
if (halving_done) {
lr_coef = lr; //permanently decrease learning rate (for Fisher scoring, this is not done. I.e., step halving is done newly in every iterarion of Fisher scoring)
Log::REDebug("GPModel linear regression coefficient estimation: The learning rate has been decreased permanently since with the previous learning rate, there was no decrease in the objective function in iteration number %d. New learning rate = %g", it + 1, lr_coef);
}
if (!decrease_found) {
Log::REDebug("GPModel linear regression coefficient estimation: No decrease in the objective function in iteration number %d after the maximal number of halving steps (%d).", it + 1, MAX_NUMBER_HALVING_STEPS_);
}
if (use_nesterov_acc) {
beta_after_grad_aux_lag1 = beta_after_grad_aux;
}
beta = beta_new;
}//end UpdateLinCoef
/*!
* \brief Calculate the covariance matrix ZSigmaZt of the random effects (sum of all components)
* \param[out] ZSigmaZt Covariance matrix ZSigmaZt
* \param cluster_i Cluster index for which the covariance matrix is calculated
*/
void CalcZSigmaZt(T_mat& ZSigmaZt, gp_id_t cluster_i) {
ZSigmaZt.resize(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
if (gauss_likelihood_) {
ZSigmaZt.setIdentity();
}
else {
ZSigmaZt.setZero();
}
for (int j = 0; j < num_comps_total_; ++j) {
ZSigmaZt += (*(re_comps_[cluster_i][j]->GetZSigmaZt()));
}
}//end CalcZSigmaZt
/*!
* \brief Calculate the covariance matrix ZSigmaZt if only_grouped_REs_use_woodbury_identity_==false or the inverse covariance matrix Sigma^-1 if there are only grouped REs i.e. if only_grouped_REs_use_woodbury_identity_==true.
* This function is only used for non-Gaussian data as in the Gaussian case this needs not be saved
*/
void CalcCovMatrixNonGauss() {
if (!only_one_grouped_RE_calculations_on_RE_scale_) {//Nothing to calculate if only_one_grouped_RE_calculations_on_RE_scale_
if (only_grouped_REs_use_woodbury_identity_) {
for (const auto& cluster_i : unique_clusters_) {
CalcSigmaIGroupedREsOnly(SigmaI_[cluster_i], cluster_i);
}
}
else {
for (const auto& cluster_i : unique_clusters_) {
if (num_comps_total_ == 1) {//no need to sum up different components
ZSigmaZt_[cluster_i] = re_comps_[cluster_i][0]->GetZSigmaZt();
}
else {
T_mat ZSigmaZt;
CalcZSigmaZt(ZSigmaZt, cluster_i);
ZSigmaZt_[cluster_i] = std::make_shared<T_mat>(ZSigmaZt);
}
}
}
}
}//end CalcCovMatrixNonGauss
/*!
* \brief Calculate the mode of the posterior of the latent random effects for use in the Laplace approximation. This function is only used for non-Gaussian data
* \param fixed_effects Fixed effects component of location parameter
* \return Approximate marginal log-likelihood evaluated at the mode
*/
double CalcModePostRandEff(const double* fixed_effects = nullptr) {
double mll = 0.;
double mll_cluster_i;
const double* fixed_effects_cluster_i_ptr = nullptr;
vec_t fixed_effects_cluster_i;
for (const auto& cluster_i : unique_clusters_) {
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
fixed_effects_cluster_i_ptr = fixed_effects;
}
else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters
fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
//TODO: this is quite inefficient as the mapping of the fixed_effects to the different clusters is done repeatedly for the same data. Could be saved if performance is an issue here.
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]];
}
fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data();
}
if (vecchia_approx_) {
likelihood_[cluster_i]->FindModePostRandEffCalcMLLVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
mll_cluster_i);
}
else {
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->FindModePostRandEffCalcMLLGroupedRE(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
mll_cluster_i);
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
mll_cluster_i);
}
else if (only_one_GP_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
mll_cluster_i);
//Note: ZSigmaZt_[cluster_i] contain Sigma=Cov(b) and not Z*Sigma*Zt since has_Z_==false for this random effects component
}
else {
(*likelihood_[cluster_i]).template FindModePostRandEffCalcMLLStable<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
mll_cluster_i);
}
}
mll += mll_cluster_i;
}
return(mll);
}//CalcModePostRandEff
/*!
* \brief Calculate matrices A and D_inv as well as their derivatives for the Vecchia approximation for one cluster (independent realization of GP)
* \param num_data_cluster_i Number of data points
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param re_comps_cluster_i Container that collects the individual component models
* \param nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param entries_init_B_cluster_i Triplets for intializing the matrices B
* \param entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param[out] B_cluster_i Matrix A = I - B (= Cholesky factor of inverse covariance) for Vecchia approximation
* \param[out] D_inv_cluster_i Diagonal matrices D^-1 for Vecchia approximation
* \param[out] B_grad_cluster_i Derivatives of matrices A ( = derivative of matrix -B) for Vecchia approximation
* \param[out] D_grad_cluster_i Derivatives of matrices D for Vecchia approximation
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance
*/
void CalcCovFactorVecchia(int num_data_cluster_i, bool calc_gradient,//TODO: make arguments const
std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i,
sp_mat_t& B_cluster_i, sp_mat_t& D_inv_cluster_i, std::vector<sp_mat_t>& B_grad_cluster_i, std::vector<sp_mat_t>& D_grad_cluster_i,
bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
int num_par_comp = re_comps_cluster_i[ind_intercept_gp_]->num_cov_par_;
int num_par_gp = num_par_comp * num_gp_total_ + calc_gradient_nugget;
//Initialize matrices B = I - A and D^-1 as well as their derivatives (in order that the code below can be run in parallel)
B_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//B = I - A
B_cluster_i.setFromTriplets(entries_init_B_cluster_i.begin(), entries_init_B_cluster_i.end());//Note: 1's are put on the diagonal
D_inv_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//D^-1. Note: we first calculate D, and then take the inverse below
D_inv_cluster_i.setIdentity();//Put 1's on the diagonal for nugget effect (entries are not overriden but added below)
if (!transf_scale) {
D_inv_cluster_i.diagonal().array() *= nugget_var;//nugget effect is not 1 if not on transformed scale
}
if (!gauss_likelihood_) {
D_inv_cluster_i.diagonal().array() *= 0.;
}
if (calc_gradient) {
B_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of B = derviateive of (-A)
D_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of D
for (int ipar = 0; ipar < num_par_gp; ++ipar) {
B_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
B_grad_cluster_i[ipar].setFromTriplets(entries_init_B_grad_cluster_i.begin(), entries_init_B_grad_cluster_i.end());
D_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
D_grad_cluster_i[ipar].setIdentity();//Put 0 on the diagonal
D_grad_cluster_i[ipar].diagonal().array() = 0.;//TODO: maybe change initialization of this matrix by also using triplets -> faster?
}
}//end initialization
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_cluster_i; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//calculate covariance matrices between observations and neighbors and among neighbors as well as their derivatives
den_mat_t cov_mat_obs_neighbors(1, num_nn);
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);
std::vector<den_mat_t> cov_grad_mats_obs_neighbors(num_par_gp);//covariance matrix plus derivative wrt to every parameter
std::vector<den_mat_t> cov_grad_mats_between_neighbors(num_par_gp);
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;//index of first parameter (variance) of component j in gradient vectors
if (j == 0) {
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);//write on matrices directly for first GP component
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();//cov_mat_obs_neighbors_j.cwiseProduct()
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
if (calc_gradient) {
cov_grad_mats_obs_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_obs_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
}
}
}//end loop over components j
}//end if(i>1)
//Calculate matrices B and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii}) and its derivatives
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_cluster_i[ind_intercept_gp_ + j]->cov_pars_[0];
if (!transf_scale) {
d_comp_j *= nugget_var;
}
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
D_inv_cluster_i.coeffRef(i, i) += d_comp_j;
if (calc_gradient) {
if (transf_scale) {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = d_comp_j;//derivative of the covariance function wrt the variance. derivative of the covariance function wrt to range is zero on the diagonal
}
else {
if (j == 0) {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = 1.;//1's on the diagonal on the orignal scale
}
else {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
}
}
}
if (calc_gradient && calc_gradient_nugget) {
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) = 1.;
}
//2. remaining terms
if (i > 0) {
if (gauss_likelihood_) {
if (transf_scale) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
}
else {
cov_mat_between_neighbors.diagonal().array() += nugget_var;
}
}
//else {//Seems unnecessary
// cov_mat_between_neighbors.diagonal().array() += 1e-10;//Avoid numerical problems when there is no nugget effect
//}
den_mat_t A_i(1, num_nn);
den_mat_t cov_mat_between_neighbors_inv;
den_mat_t A_i_grad_sigma2;
if (calc_gradient) {
// Note: it is faster (approx. 1.5-2 times) to first calculate cov_mat_between_neighbors_inv and the multiply this with the matrices below
// instead of always using the Cholesky factor of cov_mat_between_neighbors to calculate cov_mat_between_neighbors_inv * (a matrix)
den_mat_t I(num_nn, num_nn);
I.setIdentity();
cov_mat_between_neighbors_inv = cov_mat_between_neighbors.llt().solve(I);
A_i = cov_mat_obs_neighbors * cov_mat_between_neighbors_inv;
if (calc_gradient_nugget) {
A_i_grad_sigma2 = -A_i * cov_mat_between_neighbors_inv;
}
}
else {
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
}
for (int inn = 0; inn < num_nn; ++inn) {
B_cluster_i.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i(0, inn);
}
D_inv_cluster_i.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
if (calc_gradient) {
den_mat_t A_i_grad(1, num_nn);
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
A_i_grad = (cov_grad_mats_obs_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv) -
(cov_mat_obs_neighbors * cov_mat_between_neighbors_inv *
cov_grad_mats_between_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv);
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[ind_first_par + ipar].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad(0, inn);
}
if (ipar == 0) {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) -= ((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//add to derivative of diagonal elements for marginal variance
}
else {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) = -((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//don't add to existing values since derivative of diagonal is zero for range
}
}
}
if (calc_gradient_nugget) {
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[num_par_gp - 1].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad_sigma2(0, inn);
}
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) -= (A_i_grad_sigma2 * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end calc_gradient
}//end if i > 0
D_inv_cluster_i.coeffRef(i, i) = 1. / D_inv_cluster_i.coeffRef(i, i);
}//end loop over data i
}//end CalcCovFactorVecchia
/*!
* \brief Create the covariance matrix Psi and factorize it (either calculate a Cholesky factor or the inverse covariance matrix)
* Use only for Gaussian data
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true (only for Vecchia approximation)
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if vecchia_approx_==true and transf_scale ==false to transform back, normally this is equal to one, since the variance paramter is modelled separately and factored out)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance (only for Vecchia approximation)
*/
void CalcCovFactor(bool calc_gradient = false, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
if (vecchia_approx_) {
for (const auto& cluster_i : unique_clusters_) {
int num_data_cl_i = num_data_per_cluster_[cluster_i];
CalcCovFactorVecchia(num_data_cl_i, calc_gradient, re_comps_[cluster_i], nearest_neighbors_[cluster_i],
dist_obs_neighbors_[cluster_i], dist_between_neighbors_[cluster_i],
entries_init_B_[cluster_i], entries_init_B_grad_[cluster_i], z_outer_z_obs_neighbors_[cluster_i],
B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], transf_scale, nugget_var, calc_gradient_nugget);
}
}
else {
CalcSigmaComps();
for (const auto& cluster_i : unique_clusters_) {
if (only_grouped_REs_use_woodbury_identity_) {//Use Woodburry matrix inversion formula: used only if there are only grouped REs
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
CalcSigmaIGroupedREsOnly(SigmaI_[cluster_i], cluster_i);
chol_facts_[cluster_i] = (SigmaI_[cluster_i].diagonal().array() + ZtZ_[cluster_i].diagonal().array()).sqrt().matrix().asDiagonal();
}
else {
sp_mat_t SigmaI;
CalcSigmaIGroupedREsOnly(SigmaI, cluster_i);
T_mat SigmaIplusZtZ = SigmaI + ZtZ_[cluster_i];
CalcChol<T_mat>(SigmaIplusZtZ, cluster_i, do_symbolic_decomposition_);
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat psi;
CalcZSigmaZt(psi, cluster_i);
CalcChol<T_mat>(psi, cluster_i, do_symbolic_decomposition_);
}//end not only_grouped_REs_use_woodbury_identity_
}
do_symbolic_decomposition_ = false;//Symbolic decompostion done only once (if sparse matrices are used)
}
}
/*!
* \brief Calculate Psi^-1*y (and save in y_aux_)
* \param marg_variance The marginal variance. Default = 1.
*/
void CalcYAux(double marg_variance = 1.) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (B_.find(cluster_i) == B_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
y_aux_[cluster_i] = B_[cluster_i].transpose() * D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
if (only_grouped_REs_use_woodbury_identity_) {
vec_t MInvZty;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvZty = (Zty_[cluster_i].array() / (chol_facts_[cluster_i].diagonal().array().square())).matrix();
}
else {
MInvZty = chol_facts_solve_[cluster_i].solve(Zty_[cluster_i]);
}
y_aux_[cluster_i] = y_[cluster_i] - Zt_[cluster_i].transpose() * MInvZty;
}
else {
//Version 1: let Eigen do the computation
y_aux_[cluster_i] = chol_facts_solve_[cluster_i].solve(y_[cluster_i]);
//// Version 2 'do-it-yourself' (for sparse matrices)
//y_aux_[cluster_i] = y_[cluster_i];
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
//sp_L_t_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
}
}//end non-Vecchia
if (marg_variance != 1.) {
y_aux_[cluster_i] /= marg_variance;
}
}
y_aux_has_been_calculated_ = true;
}
/*!
* \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if sparse matrices are used
* \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcYtilde(bool also_calculate_ytilde2 = false) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
y_tilde_[cluster_i] = (Zty_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix();
if (also_calculate_ytilde2) {
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix());
}
}
else {
y_tilde_[cluster_i] = Zty_[cluster_i];
const double* val = chol_facts_[cluster_i].valuePtr();
const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
sp_L_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data());
if (also_calculate_ytilde2) {
vec_t ytilde_aux = y_tilde_[cluster_i];
sp_L_t_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data());
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux;
}
}
}
}
/*!
* \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if dense matrices are used
* \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcYtilde(bool also_calculate_ytilde2 = false) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
y_tilde_[cluster_i] = y_tilde_[cluster_i] = (Zty_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix();
if (also_calculate_ytilde2) {
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix());
}
}
else {
y_tilde_[cluster_i] = Zty_[cluster_i];
L_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data());
if (also_calculate_ytilde2) {
vec_t ytilde_aux = y_tilde_[cluster_i];
L_t_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data());
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux;
}
}
}
}
/*!
* \brief Calculate y^T*Psi^-1*y if sparse matrices are used
* \param[out] yTPsiInvy y^T*Psi^-1*y
* \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored
* \param cluster_ind Cluster index
* \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not only_grouped_REs_use_woodbury_identity_)
* \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_)
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters = true,
gp_id_t cluster_ind = 1, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) {
yTPsiInvy = 0;
std::vector<gp_id_t> clusters_iterate;
if (all_clusters) {
clusters_iterate = unique_clusters_;
}
else {
clusters_iterate = std::vector<gp_id_t>(1);
clusters_iterate[0] = cluster_ind;
}
for (const auto& cluster_i : clusters_iterate) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
if (B_.find(cluster_i) == B_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i];
yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0);
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
if (only_grouped_REs_use_woodbury_identity_) {
if (!CalcYtilde_already_done) {
CalcYtilde<T_mat>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) {
Log::REFatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first.");
}
yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0);
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
vec_t y_aux_sqrt = y_[cluster_i];
const double* val = chol_facts_[cluster_i].valuePtr();
const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_sqrt.data());
yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}
}
/*!
* \brief Calculate y^T*Psi^-1*y if dense matrices are used
* \param[out] yTPsiInvy y^T*Psi^-1*y
* \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored
* \param cluster_ind Cluster index
* \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not only_grouped_REs_use_woodbury_identity_)
* \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_)
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters = true,
gp_id_t cluster_ind = 1, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) {
yTPsiInvy = 0;
std::vector<gp_id_t> clusters_iterate;
if (all_clusters) {
clusters_iterate = unique_clusters_;
}
else {
clusters_iterate = std::vector<gp_id_t>(1);
clusters_iterate[0] = cluster_ind;
}
for (const auto& cluster_i : clusters_iterate) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
if (B_.find(cluster_i) == B_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i];
yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0);
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
if (only_grouped_REs_use_woodbury_identity_) {
if (!CalcYtilde_already_done) {
CalcYtilde<T_mat>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) {
Log::REFatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first.");
}
yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0);
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
vec_t y_aux_sqrt = y_[cluster_i];
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], y_aux_sqrt.data());
yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}
}
/*!
* \brief Calculate gradient for covariance parameters
* This assumes that the covariance matrix has been factorized (by 'CalcCovFactor') and that y_aux or y_tilde/y_tilde2 (if only_grouped_REs_use_woodbury_identity_) have been calculated (by 'CalcYAux' or 'CalcYtilde')
* \param cov_pars Covariance parameters
* \param[out] grad Gradient w.r.t. covariance parameters
* \param include_error_var If true, the gradient for the marginal variance parameter (=error, nugget effect) is also calculated, otherwise not (set this to true if the nugget effect is not calculated by using the closed-form solution)
* \param save_psi_inv If true, the inverse covariance matrix Psi^-1 is saved for reuse later (e.g. when calculating the Fisher information in Fisher scoring). This option is ignored if the Vecchia approximation is used.
* \param fixed_effects Fixed effects component of location parameter (used only for non-Gaussian data)
*/
void CalcCovParGrad(vec_t& cov_pars, vec_t& cov_grad, bool include_error_var = false,
bool save_psi_inv = false, const double* fixed_effects = nullptr) {
if (gauss_likelihood_) {//Gaussian data
if (include_error_var) {
cov_grad = vec_t::Zero(num_cov_par_);
}
else {
cov_grad = vec_t::Zero(num_cov_par_ - 1);
}
int first_cov_par = include_error_var ? 1 : 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {//Vechia approximation
vec_t u(num_data_per_cluster_[cluster_i]);
vec_t uk(num_data_per_cluster_[cluster_i]);
if (include_error_var) {
u = B_[cluster_i] * y_[cluster_i];
cov_grad[0] += -1. * ((double)(u.transpose() * D_inv_[cluster_i] * u)) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
u = D_inv_[cluster_i] * u;
}
else {
u = D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];//TODO: this is already calculated in CalcYAux -> save it there and re-use here?
}
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_comp = re_comps_[cluster_i][j]->num_cov_par_;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
uk = B_grad_[cluster_i][num_par_comp * j + ipar] * y_[cluster_i];
cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += ((uk.dot(u) - 0.5 * u.dot(D_grad_[cluster_i][num_par_comp * j + ipar] * u)) / sigma2_ +
0.5 * (D_inv_[cluster_i].diagonal()).dot(D_grad_[cluster_i][num_par_comp * j + ipar].diagonal()));
}
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_) {
if (include_error_var) {
double yTPsiInvy;
CalcYTPsiIInvY<T_mat>(yTPsiInvy, false, cluster_i, true, true);
cov_grad[0] += -1. * yTPsiInvy / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
}
std::vector<T_mat> LInvZtZj_cluster_i;
if (save_psi_inv) {
LInvZtZj_[cluster_i].clear();
LInvZtZj_cluster_i = std::vector<T_mat>(num_comps_total_);
}
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
vec_t y_tilde_j = (*Z_j).transpose() * y_[cluster_i];
vec_t y_tilde2_j = (*Z_j).transpose() * y_tilde2_[cluster_i];
double yTPsiIGradPsiPsiIy = y_tilde_j.transpose() * y_tilde_j - 2. * (double)(y_tilde_j.transpose() * y_tilde2_j) + y_tilde2_j.transpose() * y_tilde2_j;
yTPsiIGradPsiPsiIy *= cov_pars[j + 1];
T_mat LInvZtZj;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal
LInvZtZj = ZtZ_[cluster_i];
LInvZtZj.diagonal().array() /= chol_facts_[cluster_i].diagonal().array();
}
else {
CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj, cluster_i, true);
}
if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information
LInvZtZj_cluster_i[j] = LInvZtZj;
}
double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj.squaredNorm();
trace_PsiInvGradPsi *= cov_pars[j + 1];
cov_grad[first_cov_par + j] += -1. * yTPsiIGradPsiPsiIy / sigma2_ / 2. + trace_PsiInvGradPsi / 2.;
}
if (save_psi_inv) {
LInvZtZj_[cluster_i] = LInvZtZj_cluster_i;
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat psi_inv;
CalcPsiInv(psi_inv, cluster_i);
if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information
psi_inv_[cluster_i] = psi_inv;
}
if (include_error_var) {
cov_grad[0] += -1. * ((double)(y_[cluster_i].transpose() * y_aux_[cluster_i])) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
}
for (int j = 0; j < num_comps_total_; ++j) {
for (int ipar = 0; ipar < re_comps_[cluster_i][j]->num_cov_par_; ++ipar) {
std::shared_ptr<T_mat> gradPsi = re_comps_[cluster_i][j]->GetZSigmaZtGrad(ipar, true, 1.);
cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += -1. * ((double)(y_aux_[cluster_i].transpose() * (*gradPsi) * y_aux_[cluster_i])) / sigma2_ / 2. +
((double)(((*gradPsi).cwiseProduct(psi_inv)).sum())) / 2.;
}
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}// end loop over clusters
}//end gauss_likelihood_
else {//not gauss_likelihood_
if (include_error_var) {
Log::REFatal("There is no error variance (nugget effect) for non-Gaussian data");
}
cov_grad = vec_t::Zero(num_cov_par_);
vec_t cov_grad_cluster_i(num_cov_par_);
vec_t empty_unused_vec(0);//placeholder for fixed effects gradient
const double* fixed_effects_cluster_i_ptr = nullptr;
vec_t fixed_effects_cluster_i;
for (const auto& cluster_i : unique_clusters_) {
//map fixed effects to clusters (if needed)
vec_t grad_F_cluster_i(num_data_per_cluster_[cluster_i]);
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
fixed_effects_cluster_i_ptr = fixed_effects;
}
else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters
fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]];
}
fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data();
}
if (vecchia_approx_) {//Vechia approximation
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
B_grad_[cluster_i],
D_grad_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxGroupedRE<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
cum_num_rand_eff_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}//end only_grouped_REs_use_woodbury_identity_
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}
else if (only_one_GP_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
re_comps_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}
else {//not only_grouped_REs_use_woodbury_identity_ and not only_one_GP_calculations_on_RE_scale_
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxStable<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
re_comps_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
cov_grad += cov_grad_cluster_i;
}// end loop over clusters
}//end not gauss_likelihood_
}//end CalcCovParGrad
/*!
* \brief Apply a momentum step
* \param it Iteration number
* \param pars Parameters
* \param pars_lag1 Parameters from last iteration
* \param[out] pars_acc Accelerated parameters
* \param nesterov_acc_rate Nesterov acceleration speed
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param exclude_first_log_scale If true, no momentum is applied to the first value and the momentum step is done on the log-scale for the other values. Default = true
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param log_scale If true, the momentum step is done on the log-scale
*/
void ApplyMomentumStep(int it, vec_t& pars, vec_t& pars_lag1, vec_t& pars_acc, double nesterov_acc_rate = 0.5,
int nesterov_schedule_version = 0, bool exclude_first_log_scale = true, int momentum_offset = 2, bool log_scale = false) {
double mu = NesterovSchedule(it, nesterov_schedule_version, nesterov_acc_rate, momentum_offset);
int num_par = (int)pars.size();
if (exclude_first_log_scale) {
pars_acc[0] = pars[0];
pars_acc.segment(1, num_par - 1) = ((mu + 1.) * (pars.segment(1, num_par - 1).array().log()) - mu * (pars_lag1.segment(1, num_par - 1).array().log())).exp().matrix();//Momentum is added on the log scale
}
else {
if (log_scale) {
pars_acc = ((mu + 1.) * (pars.array().log()) - mu * (pars_lag1.array().log())).exp().matrix();
}
else {
pars_acc = (mu + 1) * pars - mu * pars_lag1;
}
}
}
/*!
* \brief Calculate gradient for linear fixed-effect coefficients
* \param marg_var Marginal variance parameters sigma^2 (only used for Gaussian data)
* \param beta Linear regression coefficients
* \param[out] grad_beta Gradient for linear regression coefficients
* \param fixed_effects Fixed effects component of location parameter for observed data (only used for non-Gaussian data)
*/
void CalcLinCoefGrad(double marg_var, const vec_t beta, vec_t& grad_beta, const double* fixed_effects = nullptr) {
if (gauss_likelihood_) {
const vec_t resid = y_vec_ - (X_ * beta);
SetY(resid.data());
CalcYAux();
vec_t y_aux(num_data_);
GetYAux(y_aux);
grad_beta = (-1. / marg_var) * (X_.transpose()) * y_aux;
//beta += lr * (1. / marg_var) * (X.transpose()) * y_aux;
}
else {
vec_t grad_F(num_data_);
CalcGradFLaplace(grad_F.data(), fixed_effects);
grad_beta = (X_.transpose()) * grad_F;
}
}
/*!
* \brief Update linear fixed-effect coefficients using generalized least squares (GLS)
* \param X Covariate data for linear fixed-effect
* \param[out] beta Linear regression coefficients
*/
void UpdateCoefGLS(den_mat_t& X, vec_t& beta) {
vec_t y_aux(num_data_);
GetYAux(y_aux);
den_mat_t XT_psi_inv_X;
CalcXTPsiInvX(X, XT_psi_inv_X);
beta = XT_psi_inv_X.llt().solve(X.transpose() * y_aux);
}
/*!
* \brief Calculate the Fisher information for covariance parameters. Note: you need to call CalcCovFactor first
* \param cov_pars Covariance parameters
* \param[out] FI Fisher information
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise on the original scale. Default = true
* \param include_error_var If true, the marginal variance parameter is also included, otherwise not
* \param use_saved_psi_inv If false, the inverse covariance matrix Psi^-1 is calculated, otherwise a saved version is used
*/
void CalcFisherInformation(const vec_t& cov_pars, den_mat_t& FI, bool transf_scale = true,
bool include_error_var = false, bool use_saved_psi_inv = false) {
if (include_error_var) {
FI = den_mat_t(num_cov_par_, num_cov_par_);
}
else {
FI = den_mat_t(num_cov_par_ - 1, num_cov_par_ - 1);
}
FI.setZero();
int start_cov_pars = include_error_var ? 1 : 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
//Note: if transf_scale==false, then all matrices and derivatives have been calculated on the original scale for the Vecchia approximation, that is why there is no adjustment here
//Calculate auxiliary matrices for use below
sp_mat_t Identity(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
Identity.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_solve(B_[cluster_i], Identity, B_inv, true);//No noticeable difference in (n=500, nn=100/30) compared to using eigen_sp_Lower_sp_RHS_cs_solve()
//eigen_sp_Lower_sp_RHS_cs_solve(B_[cluster_i], Identity, B_inv, true);
sp_mat_t D = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D.setIdentity();
D.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(-1);
sp_mat_t D_inv_2 = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D_inv_2.setIdentity();
D_inv_2.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(2);
//Calculate derivative(B) * B^-1
std::vector<sp_mat_t> B_grad_B_inv(num_cov_par_ - 1);
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
B_grad_B_inv[par_nb] = B_grad_[cluster_i][par_nb] * B_inv;
}
//Calculate Fisher information
sp_mat_t D_inv_B_grad_B_inv, B_grad_B_inv_D;
if (include_error_var) {
//First calculate terms for nugget effect / noise variance parameter
if (transf_scale) {//Optimization is done on transformed scale (in particular, log-scale)
//The derivative for the nugget variance on the log scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += (double)((D_inv_[cluster_i].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum()) / 2.;
}
}
else {//Original scale for asymptotic covariance matrix
int ind_grad_nugget = num_cov_par_ - 1;
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][ind_grad_nugget] * B_inv;
B_grad_B_inv_D = B_grad_[cluster_i][ind_grad_nugget] * B_inv * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array()).sum());
FI(0, 0) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
B_grad_B_inv_D = B_grad_B_inv[par_nb] * D;
diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum());
FI(0, par_nb + 1) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
}
}
}
//Remaining covariance parameters
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_B_inv[par_nb];
for (int par_nb_cross = par_nb; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) {
B_grad_B_inv_D = B_grad_B_inv[par_nb_cross] * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array() * D_grad_[cluster_i][par_nb_cross].diagonal().array()).sum());
FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
}
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_) {
//Notation used below: M = Sigma^-1 + ZtZ, Sigma = cov(b) b=latent random effects, L=chol(M) i.e. M=LLt, MInv = M^-1 = L^-TL^-1
if (!use_saved_psi_inv) {
LInvZtZj_[cluster_i] = std::vector<T_mat>(num_comps_total_);
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal
LInvZtZj_[cluster_i][0] = ZtZ_[cluster_i];
LInvZtZj_[cluster_i][0].diagonal().array() /= chol_facts_[cluster_i].diagonal().array();
}
else {
for (int j = 0; j < num_comps_total_; ++j) {
CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj_[cluster_i][j], cluster_i, true);
}
}
}
if (include_error_var) {
if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale)
//The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int j = 0; j < num_comps_total_; ++j) {
double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj_[cluster_i][j].squaredNorm();
FI(0, j + 1) += trace_PsiInvGradPsi * cov_pars[j + 1] / 2.;
}
}//end transf_scale
else {//not transf_scale
T_mat MInv_ZtZ;//=(Sigma_inv + ZtZ)^-1 * ZtZ
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal
MInv_ZtZ = T_mat(ZtZ_[cluster_i].rows(), ZtZ_[cluster_i].cols());
MInv_ZtZ.setIdentity();//initialize
MInv_ZtZ.diagonal().array() = ZtZ_[cluster_i].diagonal().array() / (chol_facts_[cluster_i].diagonal().array().square());
}
else {
T_mat ZtZ = T_mat(ZtZ_[cluster_i]);//TODO: this step is not needed for sparse matrices (i.e. copying is not required)
MInv_ZtZ = chol_facts_solve_[cluster_i].solve(ZtZ);
}
T_mat MInv_ZtZ_t = MInv_ZtZ.transpose();//TODO: possible without saving MInv_ZtZ.transpose()? -> compiler problem in MInv_ZtZ.cwiseProduct(MInv_ZtZ.transpose())
FI(0, 0) += (num_data_per_cluster_[cluster_i] - 2. * MInv_ZtZ.diagonal().sum() + (double)(MInv_ZtZ.cwiseProduct(MInv_ZtZ_t)).sum()) / (cov_pars[0] * cov_pars[0] * 2.);
for (int j = 0; j < num_comps_total_; ++j) {
T_mat ZjZ_MInv_ZtZ_t = MInv_ZtZ_t * ZtZj_[cluster_i][j];
T_mat ZtZj = T_mat(ZtZj_[cluster_i][j]);
double trace_PsiInvGradPsi;
if (num_comps_total_ > 1) {
T_mat MInv_ZtZj = chol_facts_solve_[cluster_i].solve(ZtZj);
trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) +
(double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZj)).sum();
}
else {
trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) +
(double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZ)).sum();
}
FI(0, j + 1) += trace_PsiInvGradPsi / (cov_pars[0] * cov_pars[0] * 2.);
}
}//end not transf_scale
}//end include_error_var
//Remaining covariance parameters
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
for (int k = j; k < num_comps_total_; ++k) {
sp_mat_t* Z_k = re_comps_[cluster_i][k]->GetZ();
sp_mat_t Zjt_Zk = (*Z_j).transpose() * (*Z_k);
T_mat LInvZtZj_t_LInvZtZk = LInvZtZj_[cluster_i][j].transpose() * LInvZtZj_[cluster_i][k];
double FI_jk = Zjt_Zk.squaredNorm() + LInvZtZj_t_LInvZtZk.squaredNorm() - 2. * (double)(Zjt_Zk.cwiseProduct(LInvZtZj_t_LInvZtZk)).sum();
if (transf_scale) {
FI_jk *= cov_pars[j + 1] * cov_pars[k + 1];
}
else {
FI_jk /= cov_pars[0] * cov_pars[0];
}
FI(j + start_cov_pars, k + start_cov_pars) += FI_jk / 2.;
}
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat psi_inv;
if (use_saved_psi_inv) {
psi_inv = psi_inv_[cluster_i];
}
else {
CalcPsiInv(psi_inv, cluster_i);
}
if (!transf_scale) {
psi_inv /= cov_pars[0];//psi_inv has been calculated with a transformed parametrization, so we need to divide everything by cov_pars[0] to obtain the covariance matrix
}
//Calculate Psi^-1 * derivative(Psi)
std::vector<T_mat> psi_inv_deriv_psi(num_cov_par_ - 1);
int deriv_par_nb = 0;
for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly
for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) {
psi_inv_deriv_psi[deriv_par_nb] = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0]));
deriv_par_nb++;
}
}
//Calculate Fisher information
if (include_error_var) {
//First calculate terms for nugget effect / noise variance parameter
if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale)
//The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += psi_inv_deriv_psi[par_nb].diagonal().sum() / 2.;
}
}
else {//Original scale for asymptotic covariance matrix
//The derivative for the nugget variance is the identity matrix, i.e. psi_inv_grad_psi_sigma2 = psi_inv.
FI(0, 0) += ((double)(psi_inv.cwiseProduct(psi_inv)).sum()) / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += ((double)(psi_inv.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.;
}
}
}
//Remaining covariance parameters
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
T_mat psi_inv_grad_psi_par_nb_T = psi_inv_deriv_psi[par_nb].transpose();
FI(par_nb + start_cov_pars, par_nb + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.;
for (int par_nb_cross = par_nb + 1; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) {
FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb_cross])).sum()) / 2.;
}
psi_inv_deriv_psi[par_nb].resize(0, 0);//not needed anymore
psi_inv_grad_psi_par_nb_T.resize(0, 0);
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}//end loop over clusters
FI.triangularView<Eigen::StrictlyLower>() = FI.triangularView<Eigen::StrictlyUpper>().transpose();
//for (int i = 0; i < std::min((int)FI.rows(),4); ++i) {//For debugging only
// for (int j = i; j < std::min((int)FI.cols(),4); ++j) {
// Log::REInfo("FI(%d,%d) %g", i, j, FI(i, j));
// }
//}
}
/*!
* \brief Calculate the standard deviations for the MLE of the covariance parameters as the diagonal of the inverse Fisher information (on the orignal scale and not the transformed scale used in the optimization)
* \param cov_pars MLE of covariance parameters
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCovPar(const vec_t& cov_pars, vec_t& std_dev) {
SetCovParsComps(cov_pars);
CalcCovFactor(true, false, cov_pars[0], true);
den_mat_t FI;
CalcFisherInformation(cov_pars, FI, false, true, false);
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
/*!
* \brief Calculate the standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information
* \param cov_pars MLE of covariance parameters
* \param X Covariate data for linear fixed-effect
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCoef(vec_t& cov_pars, const den_mat_t& X, vec_t& std_dev) {
if ((int)std_dev.size() >= num_data_) {
Log::REWarning("Sample size too small to calculate standard deviations for coefficients");
for (int i = 0; i < (int)std_dev.size(); ++i) {
std_dev[i] = std::numeric_limits<double>::quiet_NaN();
}
}
else {
SetCovParsComps(cov_pars);
CalcCovFactor(false, true, 1., false);
den_mat_t FI((int)X.cols(), (int)X.cols());
CalcXTPsiInvX(X, FI);
FI /= cov_pars[0];
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) for one cluster
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Total number of prediction locations (over all clusters)
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param re_group_levels_pred Group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
* \param re_group_rand_coef_data_pred Random coefficient data for grouped REs
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the predictive/conditional covariance matrix is calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param predict_var If true, the predictive/conditional variances are calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param[out] mean_pred_id Predictive mean
* \param[out] cov_mat_pred_id Predictive covariance matrix
* \param[out] var_pred_id Predictive variances
*/
void CalcPred(gp_id_t cluster_i,
int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred,
std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const std::vector<std::vector<string_t>>& re_group_levels_pred,
const double* re_group_rand_coef_data_pred,
const den_mat_t& gp_coords_mat_pred,
const double* gp_rand_coef_data_pred,
bool predict_cov_mat,
bool predict_var,
vec_t& mean_pred_id,
T_mat& cov_mat_pred_id,
vec_t& var_pred_id) {
int num_REs_obs, num_REs_pred;
if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
num_REs_pred = (int)re_group_levels_pred[0].size();
num_REs_obs = re_comps_[cluster_i][0]->GetNumUniqueREs();
}
else if (only_one_GP_calculations_on_RE_scale_) {
num_REs_pred = (int)gp_coords_mat_pred.rows();
num_REs_obs = re_comps_[cluster_i][0]->GetNumUniqueREs();
}
else {
num_REs_pred = num_data_per_cluster_pred[cluster_i];
num_REs_obs = num_data_per_cluster_[cluster_i];
}
if (predict_var) {
if (gauss_likelihood_) {
var_pred_id = vec_t::Ones(num_REs_pred);//nugget effect
}
else {
var_pred_id = vec_t::Zero(num_REs_pred);
}
}
if (predict_cov_mat) {
cov_mat_pred_id = T_mat(num_REs_pred, num_REs_pred);
if (gauss_likelihood_) {
cov_mat_pred_id.setIdentity();//nugget effect
}
else {
cov_mat_pred_id.setZero();
}
}
T_mat cross_cov(num_REs_pred, num_REs_obs);//Cross covariance between prediction and observation points
//Calculate covariance matrices
int cn = 0;//component number
bool dont_add_but_overwrite = true;
//Grouped random effects
if (num_re_group_ > 0) {
if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]);
re_comp->AddPredCovMatrices(re_group_levels_pred[0],
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
dont_add_but_overwrite,
true,
nullptr);
dont_add_but_overwrite = false;
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr);
}
}
else {
for (int j = 0; j < num_re_group_; ++j) {
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
group_data.push_back(re_group_levels_pred[j][id]);
}
re_comp->AddPredCovMatrices(group_data,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
dont_add_but_overwrite,
false,
nullptr);
dont_add_but_overwrite = false;
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr);
}
cn += 1;
}
if (num_re_group_rand_coef_ > 0) {
//Random coefficient grouped random effects
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data_pred[j * num_data_pred + id]);
group_data.push_back(re_group_levels_pred[ind_effect_group_rand_coef_[j] - 1][id]);//subtract 1 since counting starts at one for this index
}
re_comp->AddPredCovMatrices(group_data,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
false,
false,
rand_coef_data.data());
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, rand_coef_data.data());
}
cn += 1;
}
}
}
}//end grouped random effects
//Gaussian process
if (num_gp_ > 0) {
std::shared_ptr<RECompGP<T_mat>> re_comp_base = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][cn]);
re_comp_base->AddPredCovMatrices(re_comp_base->coords_,
gp_coords_mat_pred,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
dont_add_but_overwrite,
nullptr);
dont_add_but_overwrite = false;
if (predict_var) {
re_comp_base->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr);
}
cn += 1;
if (num_gp_rand_coef_ > 0) {
std::shared_ptr<RECompGP<T_mat>> re_comp;
//Random coefficient Gaussian processes
for (int j = 0; j < num_gp_rand_coef_; ++j) {
re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][cn]);
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
re_comp->AddPredCovMatrices(re_comp_base->coords_,
gp_coords_mat_pred,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
false,
rand_coef_data.data());
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, rand_coef_data.data());
}
cn += 1;
}
}
}
//Calculate predictive means and covariances
if (gauss_likelihood_) {//Gaussian data
if (only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
vec_t Zt_y_aux = vec_t::Zero(num_REs_obs);
#pragma omp parallel
{
vec_t Zt_y_aux_private = vec_t::Zero(num_REs_obs);
#pragma omp for
for (data_size_t i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
Zt_y_aux_private[re_comps_[cluster_i][0]->random_effects_indices_of_data_[i]] += y_aux_[cluster_i][i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_REs_obs; ++i_re) {
Zt_y_aux[i_re] += Zt_y_aux_private[i_re];
}
}//end omp critical
}//end omp parallel
mean_pred_id = cross_cov * Zt_y_aux;
}//end only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
else {
mean_pred_id = cross_cov * y_aux_[cluster_i];
}
if ((predict_cov_mat || predict_var) && only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
sp_mat_t* Z = re_comps_[cluster_i][0]->GetZ();
T_mat cross_cov_temp = cross_cov;
cross_cov = cross_cov_temp * (*Z).transpose();
cross_cov_temp.resize(0, 0);
//TODO (low-prio): things could be done more efficiently (using random_effects_indices_of_data_) as ZtZ_ is diagonal
}
if (predict_cov_mat){
if (only_grouped_REs_use_woodbury_identity_) {
T_mat ZtM_aux = T_mat(Zt_[cluster_i] * cross_cov.transpose());
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
ZtM_aux = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtM_aux;
cov_mat_pred_id -= (cross_cov * T_mat(cross_cov.transpose()) - ZtM_aux.transpose() * ZtM_aux);
}
else {
cov_mat_pred_id -= (cross_cov * T_mat(cross_cov.transpose()) - ZtM_aux.transpose() * chol_facts_solve_[cluster_i].solve(ZtM_aux));
}
}
else {
cov_mat_pred_id -= (cross_cov * (chol_facts_solve_[cluster_i].solve(T_mat(cross_cov.transpose()))));
}
}//end predict_cov_mat
if (predict_var) {
T_mat M_aux2;
if (only_grouped_REs_use_woodbury_identity_) {
T_mat ZtM_aux = T_mat(Zt_[cluster_i] * cross_cov.transpose());
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
M_aux2 = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtM_aux;
}
else {
CalcLInvH(chol_facts_[cluster_i], ZtM_aux, M_aux2, true);
}
M_aux2 = M_aux2.cwiseProduct(M_aux2);
cross_cov = cross_cov.cwiseProduct(cross_cov);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_REs_pred; ++i) {
var_pred_id[i] -= cross_cov.row(i).sum() - M_aux2.col(i).sum();
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat M_auxT = cross_cov.transpose();
CalcLInvH(chol_facts_[cluster_i], M_auxT, M_aux2, true);
M_aux2 = M_aux2.cwiseProduct(M_aux2);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_REs_pred; ++i) {
var_pred_id[i] -= M_aux2.col(i).sum();
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end predict_var
}//end gauss_likelihood_
if (!gauss_likelihood_) {//not gauss_likelihood_
const double* fixed_effects_cluster_i_ptr = nullptr;
// Note that fixed_effects_cluster_i_ptr is not used since calc_mode == false
// The mode has been calculated already before in the Predict() function above
if (vecchia_approx_) {
likelihood_[cluster_i]->PredictLAApproxVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else {
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->PredictLAApproxGroupedRE(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else if (only_one_GP_calculations_on_RE_scale_) {
likelihood_[cluster_i]->PredictLAApproxOnlyOneGPCalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else {
likelihood_[cluster_i]->PredictLAApproxStable(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
}
}//end not gauss_likelihood_
}//end CalcPred
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Total number of prediction locations (over all clusters)
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_pred_cli);
if (CondObsOnly) {
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, num_data_cli - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, -1);
}
//Random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_pred_cli);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_;//First entries are the observed data, then the predicted data
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {//TODO: maybe do the following in parallel? (see CalcPredVecchiaPredictedFirstOrder)
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
if (j == 0) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
}
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[num_data_cli + i];
if ((num_data_cli + i) > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_Bpo, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
entries_init_Bpo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli, 0.));
}
}
}
sp_mat_t Bpo(num_data_pred_cli, num_data_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bpo.setFromTriplets(entries_init_Bpo.begin(), entries_init_Bpo.end());//initialize matrices (in order that the code below can be run in parallel)
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Dp(num_data_pred_cli, num_data_pred_cli);
Dp.setIdentity();//Put 1 on the diagonal (for nugget effect)
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
Dp.coeffRef(i, i) += d_comp_j;
}
//2. remaining terms
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
Bpo.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli) -= A_i(0, inn);
}
}
Dp.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}//end loop over data i
mean_pred_id = -Bpo * y_[cluster_i];
if (!CondObsOnly) {
sp_L_solve(Bp.valuePtr(), Bp.innerIndexPtr(), Bp.outerIndexPtr(), num_data_pred_cli, mean_pred_id.data());
}
if (predict_cov_mat) {
if (CondObsOnly) {
cov_mat_pred_id = Dp;
}
else {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t Bp_inv;
eigen_sp_Lower_sp_RHS_cs_solve(Bp, Identity, Bp_inv, true);
cov_mat_pred_id = T_mat(Bp_inv * Dp * Bp_inv.transpose());
}
}
}//end CalcPredVecchiaObservedFirstOrder
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable proces when prediction locations appear first in the ordering
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Total number of prediction locations (over all clusters)
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaPredictedFirstOrder(gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_tot, dim_gp_coords_);
coords_all << gp_coords_mat_pred, gp_coords_mat_obs;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_tot);
find_nearest_neighbors_Veccia_fast(coords_all, num_data_tot, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
//Prepare data for random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_tot);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data(num_data_tot);//First entries are the predicted data, then the observed data
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
rand_coef_data[i] = gp_rand_coef_data_pred[j * num_data_pred + data_indices_per_cluster_pred[cluster_i][i]];
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_cli; ++i) {
rand_coef_data[num_data_pred_cli + i] = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_[i];
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
if (j == 0) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
}
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bo, Bop, and Bp
std::vector<Triplet_t> entries_init_Bo, entries_init_Bop, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
for (int i = 0; i < num_data_cli; ++i) {
entries_init_Bo.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i + num_data_pred_cli].size(); ++inn) {
if (nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
entries_init_Bop.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] - num_data_pred_cli, 0.));
}
}
}
sp_mat_t Bo(num_data_cli, num_data_cli);
sp_mat_t Bop(num_data_cli, num_data_pred_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bo.setFromTriplets(entries_init_Bo.begin(), entries_init_Bo.end());//initialize matrices (in order that the code below can be run in parallel)
Bop.setFromTriplets(entries_init_Bop.begin(), entries_init_Bop.end());
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Do_inv(num_data_cli, num_data_cli);
sp_mat_t Dp_inv(num_data_pred_cli, num_data_pred_cli);
Do_inv.setIdentity();//Put 1 on the diagonal (for nugget effect)
Dp_inv.setIdentity();
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) += d_comp_j;
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) += d_comp_j;
}
}
//2. remaining terms
if (i > 0) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (i < num_data_pred_cli) {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
if (nearest_neighbors_cluster_i[i][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
Bop.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bo.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn] - num_data_pred_cli) -= A_i(0, inn);
}
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) = 1 / Dp_inv.coeffRef(i, i);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) = 1 / Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli);
}
}//end loop over data i
sp_mat_t cond_prec = Bp.transpose() * Dp_inv * Bp + Bop.transpose() * Do_inv * Bop;
chol_sp_mat_t CholFact;
CholFact.compute(cond_prec);
if (predict_cov_mat) {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t cond_prec_chol = CholFact.matrixL();
sp_mat_t cond_prec_chol_inv;
eigen_sp_Lower_sp_RHS_cs_solve(cond_prec_chol, Identity, cond_prec_chol_inv, true);
cov_mat_pred_id = T_mat(cond_prec_chol_inv.transpose() * cond_prec_chol_inv);
mean_pred_id = -cov_mat_pred_id * Bop.transpose() * Do_inv * Bo * y_[cluster_i];
}
else {
mean_pred_id = -CholFact.solve(Bop.transpose() * Do_inv * Bo * y_[cluster_i]);
}
}//end CalcPredVecchiaPredictedFirstOrder
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the latent process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaLatentObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i,
std::map<gp_id_t, int>& num_data_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) {
if (num_gp_rand_coef_ > 0) {
Log::REFatal("The Vecchia approximation for latent process(es) is currently not implemented when having random coefficients");
}
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
//Determine number of unique observartion locations
std::vector<int> uniques;//unique points
std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates
DetermineUniqueDuplicateCoords(gp_coords_mat_obs, num_data_cli, uniques, unique_idx);
int num_coord_unique_obs = (int)uniques.size();
//Determine unique locations (observed and predicted)
DetermineUniqueDuplicateCoords(coords_all, num_data_tot, uniques, unique_idx);
int num_coord_unique = (int)uniques.size();
den_mat_t coords_all_unique;
if ((int)uniques.size() == num_data_tot) {//no multiple observations at the same locations -> no incidence matrix needed
coords_all_unique = coords_all;
}
else {
coords_all_unique = coords_all(uniques, Eigen::all);
}
//Determine incidence matrices
sp_mat_t Z_o = sp_mat_t(num_data_cli, uniques.size());
sp_mat_t Z_p = sp_mat_t(num_data_pred_cli, uniques.size());
for (int i = 0; i < num_data_tot; ++i) {
if (i < num_data_cli) {
Z_o.insert(i, unique_idx[i]) = 1.;
}
else {
Z_p.insert(i - num_data_cli, unique_idx[i]) = 1.;
}
}
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_coord_unique);
if (CondObsOnly) {//find neighbors among both the observed locations only
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, num_coord_unique_obs - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_B;
for (int i = 0; i < num_coord_unique; ++i) {
entries_init_B.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_B.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
sp_mat_t B(num_coord_unique, num_coord_unique);
B.setFromTriplets(entries_init_B.begin(), entries_init_B.end());//initialize matrices (in order that the code below can be run in parallel)
sp_mat_t D(num_coord_unique, num_coord_unique);
D.setIdentity();
D.diagonal().array() = 0.;
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_coord_unique; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
D.coeffRef(i, i) = re_comps_[cluster_i][ind_intercept_gp_]->cov_pars_[0];
//2. remaining terms
if (i > 0) {
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
B.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
D.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end loop over data i
//Calculate D_inv and B_inv in order to calcualte Sigma and Sigma^-1
sp_mat_t D_inv(num_coord_unique, num_coord_unique);
D_inv.setIdentity();
D_inv.diagonal().array() = D.diagonal().array().pow(-1);
sp_mat_t Identity_all(num_coord_unique, num_coord_unique);
Identity_all.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_cs_solve(B, Identity_all, B_inv, true);
//Calculate inverse of covariance matrix for observed data using the Woodbury identity
sp_mat_t Z_o_T = Z_o.transpose();
sp_mat_t M_aux_Woodbury = B.transpose() * D_inv * B + Z_o_T * Z_o;
chol_sp_mat_t CholFac_M_aux_Woodbury;
CholFac_M_aux_Woodbury.compute(M_aux_Woodbury);
if (predict_cov_mat) {
//Using Eigen's solver
sp_mat_t M_aux_Woodbury2 = CholFac_M_aux_Woodbury.solve(Z_o_T);
sp_mat_t Identity_obs(num_data_cli, num_data_cli);
Identity_obs.setIdentity();
sp_mat_t ZoSigmaZoT_plusI_Inv = -Z_o * M_aux_Woodbury2 + Identity_obs;
sp_mat_t ZpSigmaZoT = Z_p * B_inv * D * B_inv.transpose() * Z_o_T;
sp_mat_t M_aux = ZpSigmaZoT * ZoSigmaZoT_plusI_Inv;
mean_pred_id = M_aux * y_[cluster_i];
sp_mat_t Identity_pred(num_data_pred_cli, num_data_pred_cli);
Identity_pred.setIdentity();
cov_mat_pred_id = T_mat(Z_p * B_inv * D * B_inv.transpose() * Z_p.transpose() + Identity_pred - M_aux * ZpSigmaZoT.transpose());
}
else {
vec_t resp_aux = Z_o_T * y_[cluster_i];
vec_t resp_aux2 = CholFac_M_aux_Woodbury.solve(resp_aux);
resp_aux = y_[cluster_i] - Z_o * resp_aux2;
mean_pred_id = Z_p * B_inv * D * B_inv.transpose() * Z_o_T * resp_aux;
}
}//end CalcPredVecchiaLatentObservedFirstOrder
friend class REModel;
};
} // namespace GPBoost
#endif // GPB_RE_MODEL_TEMPLATE_H_
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) ResetMagickMemory(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) ResetMagickMemory(columns,0,rank*sizeof(*columns));
(void) ResetMagickMemory(rows,0,rank*sizeof(*rows));
(void) ResetMagickMemory(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the ResetMagickMemory method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) ResetMagickMemory(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
nv_laplacian.c | #include "nv_core.h"
#include "nv_ip_laplacian.h"
#define NV_LAPLACIAN_KERNEL_SIZE 3
void nv_laplacian1(nv_matrix_t *edge, const nv_matrix_t *gray, float level)
{
static const float kernel[3][3] = {
{ 0.0f, 1.0f, 0.0f },
{ 1.0f,-4.0f, 1.0f },
{ 0.0f, 1.0f, 0.0f }
};
nv_laplacian(edge, kernel, gray, level);
}
void nv_laplacian2(nv_matrix_t *edge, const nv_matrix_t *gray, float level)
{
static const float kernel[3][3] = {
{ 1.0f, 1.0f, 1.0f },
{ 1.0f,-8.0f, 1.0f },
{ 1.0f, 1.0f, 1.0f }
};
nv_laplacian(edge, kernel, gray, level);
}
void nv_laplacian3(nv_matrix_t *edge, const nv_matrix_t *gray, float level)
{
static const float kernel[3][3] = {
{ -1.0f, 2.0f, -1.0f },
{ 2.0f,-4.0f, 2.0f },
{ -1.0f, 2.0f, -1.0f }
};
nv_laplacian(edge, kernel, gray, level);
}
void nv_laplacian(nv_matrix_t *edge,
const float kernel[NV_LAPLACIAN_KERNEL_SIZE][NV_LAPLACIAN_KERNEL_SIZE],
const nv_matrix_t *gray,
float level)
{
int row;
int kernel_offset = NV_LAPLACIAN_KERNEL_SIZE / 2;
float max_v = -FLT_MAX;
#ifdef _OPENMP
int i;
int threads = omp_get_num_threads();
float *thread_max_v = (float *)malloc(sizeof(float) * threads);
for (i = 0; i < threads; ++i) {
thread_max_v[i] = -FLT_MAX;
}
#endif
nv_matrix_zero(edge);
#ifdef _OPENMP
#pragma omp parallel for num_threads(threads)
#endif
for (row = kernel_offset; row < gray->rows - kernel_offset; ++row) {
int col;
#ifdef _OPENMP
int thread_index = omp_get_thread_num();
#endif
for (col = kernel_offset; col < gray->cols - kernel_offset; ++col) {
int krow, kcol;
float v = 0.0f;
for (krow = 0; krow < NV_LAPLACIAN_KERNEL_SIZE; ++krow) {
for (kcol = 0; kcol < NV_LAPLACIAN_KERNEL_SIZE; ++kcol) {
v += NV_MAT3D_V(gray, row + krow - kernel_offset, col + kcol - kernel_offset, 0)
* kernel[krow][kcol];
}
}
v = max(v, 0.0f);
if (level != 0.0f) {
NV_MAT3D_V(edge, row, col, 0) = min(v * level, 255.0f);
} else {
#ifdef _OPENMP
thread_max_v[thread_index] = max(v, thread_max_v[thread_index]);
#else
max_v = max(v, max_v);
#endif
NV_MAT3D_V(edge, row, col, 0) = v;
}
}
}
#ifdef _OPENMP
if (level == 0.0f) {
for (i = 0; i < threads; ++i) {
max_v = max(max_v, thread_max_v[i]);
}
}
#endif
if (level == 0.0f) {
float th = max_v / 8.0f;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (row = kernel_offset; row < gray->rows - kernel_offset; ++row) {
int col;
for (col = kernel_offset; col < gray->cols - kernel_offset; ++col) {
if (NV_MAT3D_V(edge, row, col, 0) > th) {
NV_MAT3D_V(edge, row, col, 0) = 255.0f;
} else {
NV_MAT3D_V(edge, row, col, 0) = 0.0f;
}
}
}
}
#ifdef _OPENMP
free(thread_max_v);
#endif
}
|
cuda_utils.h | /*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#ifndef _CUDA_UTILS_H
#define _CUDA_UTILS_H
#include "cuda_utils2.h"
inline int checkwDev(int wDev){
#ifdef DEBUG
int nVis = 0;
#pragma omp critical
{
CUDACHECK(cudaGetDeviceCount(&nVis));
}
#ifdef DEBUG
for (int i = 0; i < nVis; i++){
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, i));
printf("Visible: Compute %d.%d CUDA device: [%s] : cudadeviceid: %2d of %2d devices [0x%02x] mpc=%d\n", props.major, props.minor, props.name, i\
, nVis, props.pciBusID, props.multiProcessorCount); fflush(stdout);
}
#endif
if(wDev>nVis-1){
fprintf(stderr,"Not enough GPUs, where wDev=%d and nVis=%d\n",wDev,nVis);
exit(1);
return(1);
}
else return(0);
#else
return(0);
#endif
}
#endif
|
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices when ret_typ is \"indices\" or \"both\". "
"An error will be raised if the selected data type cannot precisely represent the "
"indices.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
int dtype;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
DMLC_DECLARE_FIELD(dtype)
.add_enum("uint8", mshadow::kUint8)
.add_enum("int32", mshadow::kInt32)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(mshadow::kFloat32)
.describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or"
" \"both\". An error will be raised if the selected data type cannot precisely "
"represent the indices.");
}
};
inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape,
int *batch_size, int *element_num, int *axis, int *k,
bool *do_transpose, bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
*batch_size = src_shape.Size() / src_shape[*axis];
*element_num = src_shape[*axis];
if (*axis != static_cast<int>(src_shape.ndim()) - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
struct fill_ind_to_one {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const int* indices, DType* out) {
out[indices[i]] = static_cast<DType>(1);
}
};
struct fill_ind {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, const int* indices, const DType* val,
int req, DType* out) {
KERNEL_ASSIGN(out[indices[i]], req, val[i]);
}
};
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat,
const Tensor<cpu, 1, int>& ind,
const Tensor<cpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const int M(work.size(0)/(sizeof(DType)*N));
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
// Tensor `work` stores the flattened source data, while `dat` stores the sorted result.
DType *vals = reinterpret_cast<DType*>(work.dptr_);
DType *sorted_vals = dat.dptr_+i*N;
int *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
}
}
for (int j = 0; j < K; ++j) {
sorted_vals[j] = vals[indices[j]];
}
}
}
#ifdef __CUDACC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
int i1(K-1), i2(K-1);
for (int i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (int i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ int buff[];
// Start of buffer sections associated with this thread.
const int offset(threadIdx.x*K);
int *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (int i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (int i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
int cur_ind(ind[i]);
for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<typename DType>
MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat,
const Tensor<gpu, 1, int>& ind,
const Tensor<gpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const int M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
size_t alignment = std::max(sizeof(DType), sizeof(int));
size_t id_size = PadBytes(sizeof(int) * ind.size(0), alignment);
Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(DType)),
mshadow::Stream<gpu>::GetStream(s)>>>
(K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param k the K elements to keep
* \param param the topk parameters
* \tparam xpu the device type.
* \tparam DType type of the output value/mask.
* \tparam IDType type of the output indices.
*/
template<typename xpu, typename DType, typename IDType>
void TopKImpl(const RunContext &ctx,
const Resource &resource,
const std::vector<OpReqType>& req,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, DType> sorted_dat;
Tensor<xpu, 1, int> indices, sel_indices;
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
size_t alignment = std::max(sizeof(DType), sizeof(int));
TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s);
size_t temp_size = 0;
// Temp space needed by the gpu-based full sorts.
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<int, DType, xpu>(src.Size()));
temp_size = std::max(temp_size,
mxnet::op::SortByKeyWorkspaceSize<DType, int, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += PadBytes(sizeof(int) * src.Size(), alignment);
// Temp space for cpu sorts.
temp_size = std::max(temp_size, static_cast<size_t>(sizeof(DType) * src.Size()));
size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment)
+ PadBytes(sizeof(int) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += PadBytes(sizeof(int) * batch_size * k, alignment);
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment);
indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += PadBytes(sizeof(int) * src.Size(), alignment);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += PadBytes(sizeof(int) * batch_size * k, alignment);
CHECK_EQ(sel_indices.CheckContiguous(), true);
}
if (std::is_same<xpu, cpu>::value) {
Tensor<xpu, 1, DType> flattened_data;
if (do_transpose) {
flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr),
Shape1(src.Size()), s);
workspace_curr_ptr += sizeof(DType) * src.Size();
flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
CHECK_EQ(flattened_data.CheckContiguous(), true);
} else {
flattened_data = src.FlatTo1D<xpu, DType>(s);
}
// `temp_workspace` stores the flattened data
temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_),
Shape1(sizeof(DType)*src.Size()), s);
CHECK_EQ(temp_workspace.CheckContiguous(), true);
} else {
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
CHECK_EQ(sorted_dat.CheckContiguous(), true);
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1,
kWriteTo, indices.dptr_);
CHECK_EQ(indices.CheckContiguous(), true);
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
// `temp_workspace` is used to store the flattend source data for CPU device, and it's used as
// a temporal buffer for GPU device.
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
// When returning indices, only update(modulo) required elements instead of full elements
// to avoid redundant calculation.
// Cast `ret_indices` from int to real_t could introduce conversion error when the element_num
// is large enough.
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s);
ret_mask = scalar<DType>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
if (req[0] == kNullOp) {
return;
} else if (req[0] == kWriteTo) {
mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_, ret_mask.dptr_);
} else {
LOG(FATAL) << "req=" << req[0] << " is not supported yet.";
}
} else if (param.ret_typ == topk_enum::kReturnIndices) {
if (do_transpose) {
Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, IDType> ret_indices =
ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k),
element_num)));
}
} else {
if (do_transpose) {
Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s);
Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s);
ASSIGN_DISPATCH(ret_value, req[0], transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k), Shape3(0, 2, 1)));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k), Shape3(0, 2, 1)), element_num)));
} else {
Tensor<xpu, 2, DType> ret_value =
ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s);
Tensor<xpu, 2, IDType> ret_indices =
ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
ASSIGN_DISPATCH(ret_value, req[0],
slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k));
ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num)));
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
})
});
} else {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param);
});
}
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKImpl<xpu, DType, int>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param);
});
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.dtype = param.dtype;
topk_param.ret_typ = topk_enum::kReturnIndices;
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKImpl<xpu, DType, IDType>(ctx.run_ctx,
ctx.requested[0], req, inputs[0], outputs, topk_param);
});
});
}
template<typename xpu, typename DType, typename IDType>
void TopKBackwardImpl(const OpContext &ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const TopKParam& param) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>())
<< "'IDType' does not have a sufficient precision to represent the indices of the input array. "
<< "The total element_num is " << element_num << ", but the selected IDType can only represent "
<< mxnet::common::MaxIntegerValue<IDType>() << " elements";
Tensor<xpu, 1, int> workspace =
ctx.requested[0].get_space_typed<xpu, 1, int>(Shape1(batch_size * k + batch_size), s);
Tensor<xpu, 1, int> sel_indices =
Tensor<xpu, 1, int>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, int> batch_shift =
Tensor<xpu, 1, int>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 2, DType> out_grad =
inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, DType> in_grad =
outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0, element_num, kWriteTo,
batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s);
TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += tcast<int>(indices);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, IDType> indices =
inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s);
sel_indices = reshape(tcast<int>(indices) +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0] || kAddTo == req[0]) {
if (kWriteTo == req[0]) {
in_grad = scalar<DType>(0);
}
mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k,
sel_indices.dptr_,
out_grad.dptr_,
req[0],
in_grad.dptr_);
} else {
LOG(FATAL) << "Not Implemented!";
}
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH(param.dtype, IDType, {
TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param);
});
});
} else if (param.ret_typ == topk_enum::kReturnValue) {
MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, {
TopKBackwardImpl<xpu, DType, int>(ctx, inputs, req, outputs, param);
});
} else {
LOG(FATAL) << "Not Implemented";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK(out_size == 1 || out_size == 2);
if (out_size > 1) {
if (param.ret_typ == topk_enum::kReturnValue) {
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&(*out_attrs)[1], param.dtype))
<< "Failed to set the type of ret_indices.";
}
}
if (param.ret_typ == topk_enum::kReturnIndices) {
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices.";
} else {
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
}
return true;
}
inline bool TopKShapeImpl(const TopKParam& param,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
TShape& in_shape = (*in_attrs)[0];
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
int data_type = -1;
size_t in_size = in_attrs->size();
size_t out_size = out_attrs->size();
CHECK_EQ(in_size, 1);
CHECK_EQ(out_size, 2);
CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32))
<< "Failed to set the type of ret_indices to int32.";
CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]="
<< (*in_attrs)[0];
CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]="
<< (*out_attrs)[0];
if (data_type == -1) return false;
return true;
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK(type_assign(&(*out_attrs)[0], param.dtype))
<< "Failed to set the type of ret_indices to int32.";
return true;
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
bfecc_elemental_limiter_convection.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_BFECC_LIMITER_CONVECTION_INCLUDED )
#define KRATOS_BFECC_LIMITER_CONVECTION_INCLUDED
#define PRESSURE_ON_EULERIAN_MESH
#define USE_FEW_PARTICLES
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
#ifdef _OPENMP
#include "omp.h"
#endif
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
#include "geometries/tetrahedra_3d_4.h"
#include "includes/variables.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/timer.h"
#include "utilities/binbased_fast_point_locator.h"
#include "processes/find_nodal_neighbours_process.h"
namespace Kratos
{
template<std::size_t TDim> class BFECCLimiterConvection
{
public:
KRATOS_CLASS_POINTER_DEFINITION(BFECCLimiterConvection<TDim>);
BFECCLimiterConvection(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure)
: mpSearchStructure(pSearchStructure)
{
}
~BFECCLimiterConvection()
{
}
//**********************************************************************************************
//**********************************************************************************************
void BFECCconvect(ModelPart& rModelPart, const Variable< double >& rVar, const Variable<array_1d<double,3> >& conv_var, const double substeps)
{
KRATOS_TRY
const double dt = rModelPart.GetProcessInfo()[DELTA_TIME];
//do movement
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rModelPart.Nodes().size();
PointerVector< Element > elem_backward( rModelPart.Nodes().size());
std::vector< Vector > Ns( rModelPart.Nodes().size());
std::vector< bool > found( rModelPart.Nodes().size());
std::vector< bool > foundf( rModelPart.Nodes().size());
//FIRST LOOP: estimate rVar(n+1)
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Element::Pointer pelement;
array_1d<double,3> bckPos = iparticle->Coordinates();
const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var);
bool is_found = ConvectBySubstepping(dt,bckPos,vel, N, pelement, result_begin, max_results, -1.0, substeps);
found[i] = is_found;
if(is_found) {
//save position backwards
elem_backward(i) = pelement;
Ns[i] = N;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
}
//now obtain the value AT TIME STEP N by taking it from N+1
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Element::Pointer pelement;
array_1d<double,3> fwdPos = iparticle->Coordinates();
const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var,1);
bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, pelement, result_begin, max_results, 1.0, substeps);
foundf[i] = is_found;
if(is_found) {
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi_old = N[0] * ( geom[0].FastGetSolutionStepValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi_old += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) );
}
//Computing error 1 and modified solution at time N to be interpolated again
iparticle->GetValue(BFECC_ERROR_1) = 0.5*iparticle->FastGetSolutionStepValue(rVar,1) - 0.5*phi_old;//computing error1 as e1 = 0.5*(rVar(n) - phi_old)
iparticle->GetValue(rVar) = iparticle->FastGetSolutionStepValue(rVar,1) + iparticle->GetValue(BFECC_ERROR_1);//rVar(n)+e1
}
}
//Backward with modified solution
#pragma omp parallel for
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
bool is_found = found[i];
if(is_found) {
Vector N = Ns[i];
Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry();
double phi1 = N[0] * ( geom[0].GetValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].GetValue(rVar) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
// else
// std::cout << "it should find it" << std::endl;
}
// computing error 2 with forward of phi1
int nelements = rModelPart.NumberOfElements();
for(int i = 0 ; i < nelements; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::ElementsContainerType::iterator i_element = rModelPart.ElementsBegin() + i;
Element::GeometryType& element_geometry = i_element->GetGeometry();
Element::Pointer pelement;
array_1d<double,3> fwdPos = i_element->GetGeometry().Center();
array_1d<double,3> vel = ZeroVector(3);
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
for(int k = 0 ; k < 3; k++){
vel[k] += element_geometry[j].GetSolutionStepValue(conv_var)[k] / element_geometry.size();
}
}
bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, pelement, result_begin, max_results, 1.0, substeps);//seeking forwards
double e1 = 0.00f;
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
e1 += element_geometry[j].GetValue(BFECC_ERROR_1);
}
e1 /= element_geometry.size();
double e2 = e1;
if(is_found) {
//Forward with
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi2 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi2 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) );
}
double solution_in_center = 0;
//Computing error2 as e2 = rVar(n)-(phi2+e1)
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
solution_in_center += i_element->GetGeometry()[j].FastGetSolutionStepValue(rVar,1);
}
solution_in_center /= element_geometry.size();
e2 = solution_in_center - (phi2 + e1);
if(std::abs(e2) > std::abs(e1)){
for(unsigned int j = 0 ; j < element_geometry.size(); j++){
element_geometry[j].GetValue(BFECC_ERROR) = minmod(e1,element_geometry[j].GetValue(BFECC_ERROR_1));
}
}
}
}
#pragma omp parallel for
for (int i = 0; i < nparticles; i++){
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
bool is_found = foundf[i];
if(is_found) {
iparticle->GetValue(rVar) = iparticle->FastGetSolutionStepValue(rVar,1) + iparticle->GetValue(BFECC_ERROR);
}
}
//Backward with modified solution
#pragma omp parallel for
for (int i = 0; i < nparticles; i++){
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
bool is_found = found[i];
if(is_found) {
Vector N = Ns[i];
Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry();
double phi1 = N[0] * ( geom[0].GetValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].GetValue(rVar) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
}
KRATOS_CATCH("")
}
double minmod(double x, double y) {
double f;
if(x > 0.0f && y > 0.0f)
f = std::min(x,y);
else if(x < 0.0f && y < 0.0f)
f = std::max(x,y);
else
f = 0;
return f;
}
bool ConvectBySubstepping(
const double dt,
array_1d<double,3>& position, //IT WILL BE MODIFIED
const array_1d<double,3>& initial_velocity,
Vector& N,
Element::Pointer& pelement,
typename BinBasedFastPointLocator<TDim>::ResultIteratorType& result_begin,
const unsigned int max_results,
const double velocity_sign,
const double subdivisions)
{
bool is_found = false;
array_1d<double,3> veulerian;
const double small_dt = dt/subdivisions;
if(velocity_sign > 0.0) //going from the past to the future
{
noalias(position) += small_dt*initial_velocity;
unsigned int substep=0;
while(substep++ < subdivisions)
{
is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
const double new_step_factor = static_cast<double>(substep)/subdivisions;
const double old_step_factor = (1.0 - new_step_factor);
noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) );
noalias(position) += small_dt*veulerian;
}
else
break;
}
}
else //going from the future to the past
{
noalias(position) -= small_dt*initial_velocity;
unsigned int substep=0;
while(substep++ < subdivisions)
{
is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//this factors get inverted from the other case
const double old_step_factor = static_cast<double>(substep)/subdivisions;
const double new_step_factor = (1.0 - old_step_factor);
noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) );
noalias(position) -= small_dt*veulerian;
}
else
break;
}
}
return is_found;
}
private:
typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure;
};
} // namespace Kratos.
#endif // KRATOS_BFECC_LIMITER_CONVECTION_INCLUDED defined
|
GB_unop__carg_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__carg_fp32_fc32)
// op(A') function: GB (_unop_tran__carg_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = cargf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cargf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = cargf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CARG || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__carg_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cargf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cargf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__carg_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
filter_range.h | // MIT License
// Copyright (c) 2019 Edward Liu
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#pragma once
#include <memory>
#include "pre_processors/filter_interface.h"
namespace static_map {
namespace pre_processers {
namespace filter {
template <typename PointT>
class Range : public Interface<PointT> {
public:
USE_POINTCLOUD;
Range() : Interface<PointT>(), min_range_(0.), max_range_(100.) {
// float params
INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 0, "min_range",
min_range_);
INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 1, "max_range",
max_range_);
}
~Range() {}
Range(const Range &) = delete;
Range &operator=(const Range &) = delete;
std::shared_ptr<Interface<PointT>> CreateNewInstance() override {
return std::make_shared<Range<PointT>>();
}
void Filter(const PointCloudPtr &cloud) override {
if (!cloud || !Interface<PointT>::inner_cloud_) {
LOG(WARNING) << "nullptr cloud, do nothing!" << std::endl;
return;
}
this->FilterPrepare(cloud);
const int size = this->inner_cloud_->size();
bool is_inlier[size];
#ifdef _OPENMP
#pragma omp parallel for num_threads(LOCAL_OMP_THREADS_NUM)
#endif
for (int i = 0; i < size; ++i) {
auto &point = this->inner_cloud_->points[i];
float range =
std::sqrt(point.x * point.x + point.y * point.y + point.z * point.z);
if (range >= min_range_ && range <= max_range_) {
is_inlier[i] = true;
} else {
is_inlier[i] = false;
}
}
// first, reserve
// then, push_back
// finally, shrink to fit
// to get best efficiency and space usage
this->inliers_.reserve(size);
this->outliers_.reserve(size);
cloud->points.reserve(size);
for (int i = 0; i < size; ++i) {
if (is_inlier[i]) {
this->inliers_.push_back(i);
cloud->push_back(this->inner_cloud_->points[i]);
} else {
this->outliers_.push_back(i);
}
}
this->inliers_.shrink_to_fit();
this->outliers_.shrink_to_fit();
cloud->points.shrink_to_fit();
}
void DisplayAllParams() override {
PARAM_INFO(min_range_);
PARAM_INFO(max_range_);
}
private:
float min_range_;
float max_range_;
};
} // namespace filter
} // namespace pre_processers
} // namespace static_map
|
task_final.c | int foo (int x )
{
#pragma omp task shared(x) mergeable final (x>0)
{
x++;
}
#pragma omp taskwait
return x;
}
|
pool.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include "transform.h"
#define POOL_SIZE 2
void transform(unsigned char **image, unsigned *width, unsigned *height, unsigned threadCount) {
// Original image
unsigned char *imageIn = *image;
unsigned widthIn = *width;
unsigned heightIn = *height;
// Calculate the pooled image size
unsigned widthOut = widthIn / POOL_SIZE;
unsigned heightOut = heightIn / POOL_SIZE;
// There is one pool per pixel in the output image
unsigned poolCount = widthOut * heightOut;
// Allocate the output image
unsigned char *imageOut = malloc(sizeof(unsigned char) * poolCount * 4);
if (imageOut == NULL) {
printf("Could not allocate new image\n");
exit(-1);
}
// Parallelize the for loop
#pragma omp parallel for num_threads(threadCount)
for (unsigned poolIndex = 0; poolIndex < poolCount; poolIndex++) {
// Calculate the pool coordinates in the output image
unsigned xPool = poolIndex % widthOut;
unsigned yPool = poolIndex / widthOut;
// Calculate the pool top left pixel in the input image
unsigned char *pool = imageIn + (yPool * POOL_SIZE * widthIn + xPool * POOL_SIZE) * 4;
// Calculate the max values in the pool in the original image
unsigned char maxR = 0, maxG = 0, maxB = 0;
for (unsigned y = 0; y < POOL_SIZE * 4; y += 4) {
unsigned char *line = pool + y * widthIn;
for (unsigned x = 0; x < POOL_SIZE * 4; x += 4) {
unsigned char *pixel = line + x;
if (pixel[0] > maxR) {
maxR = pixel[0];
}
if (pixel[1] > maxG) {
maxG = pixel[1];
}
if (pixel[2] > maxB) {
maxB = pixel[2];
}
}
}
// Set the max in the new image pool
unsigned char* pixel = imageOut + poolIndex * 4;
pixel[0] = maxR;
pixel[1] = maxG;
pixel[2] = maxB;
pixel[3] = 0xFF;
}
// Delete the input image
free(imageIn);
// Replace it with the output one
*image = imageOut;
*width = widthOut;
*height = heightOut;
}
|
bitmap.c | #include <assert.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdlib.h>
#include "bitmap.h"
#include "types.h"
bitmap *bitmap_new(u32 capacity) {
atomic_uint *map = calloc((capacity / 32 + 1), sizeof(atomic_uint));
bitmap *ret = malloc(sizeof(bitmap));
*ret = (bitmap){capacity, map};
return ret;
}
void bitmap_set(bitmap *b, u32 n) {
u32 index = n / 32;
u32 offset = n % 32;
b->map[index] |= 1 << offset;
}
bool bitmap_test(bitmap *b, u32 n) {
u32 index = n / 32;
u32 offset = n % 32;
return !!((b->map[index] >> offset) & 1);
}
bool bitmap_test_set(bitmap *b, u32 n) {
u32 index = n / 32;
u32 offset = n % 32;
bool prev = !!((b->map[index] >> offset) & 1);
// Guard to prevent unnecessary contention
if (!prev) {
prev = !!((atomic_fetch_or(&b->map[index], 1 << offset) >> offset) & 1);
}
return prev;
}
void bitmap_clear(bitmap *b) {
#pragma omp parallel for
for (u32 i = 0; i < b->capacity / 32 + 1; i += 1) {
b->map[i] = 0;
}
}
void bitmap_merge(bitmap *self, bitmap *other) {
assert(self->capacity == other->capacity);
#pragma omp parallel for
for (u32 i = 0; i < self->capacity / 32 + 1; i += 1) {
self->map[i] |= other->map[i];
}
}
void bitmap_free(bitmap *b) {
free(b->map);
free(b);
}
u32 bitmap2array(bitmap *b, u32 *array) {
u32 n = 0;
for (u32 i = 0; i < b->capacity; i += 1) {
if (bitmap_test(b, i)) {
array[n++] = i;
}
}
return n;
}
void array2bitmap(u32 *array, bitmap *b, u32 n) {
#pragma omp parallel for
for (u32 i = 0; i < n; i += 1) {
bitmap_set(b, array[i]);
}
}
|
DES_bs_b.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2003,2010-2013 by Solar Designer
*
* Addition of single DES encryption with no salt by
* Deepika Dutta Mishra <dipikadutta at gmail.com> in 2012, no
* rights reserved.
*/
#include "arch.h"
#include "common.h"
#include "DES_bs.h"
#if DES_BS_ASM && defined(_OPENMP) && defined(__GNUC__)
#warning Assembly code and OpenMP are both requested - will provide the former, but not the latter (for DES-based hashes). This may likely be corrected by enabling SIMD intrinsics with the C compiler (try adding -msse2 to OMPFLAGS).
#endif
#if !DES_BS_ASM
#define vzero (*(vtype *)&DES_bs_all.zero)
#if DES_bs_mt
#define vones (*(vtype *)&DES_bs_all_by_tnum(-1).ones)
#else
#define vones (*(vtype *)&DES_bs_all.ones)
#endif
#define DES_BS_VECTOR_LOOPS 0
#if defined(__ARM_NEON__) && DES_BS_DEPTH == 64
#include <arm_neon.h>
typedef uint32x2_t vtype;
#define vst(dst, ofs, src) \
vst1_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
veor_u32((a), (b))
#define vnot(dst, a) \
(dst) = vmvn_u32((a))
#define vand(dst, a, b) \
(dst) = vand_u32((a), (b))
#define vor(dst, a, b) \
(dst) = vorr_u32((a), (b))
#define vandn(dst, a, b) \
(dst) = vbic_u32((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vbsl_u32((c), (b), (a))
#if 0
#define vshl1(dst, src) \
(dst) = vadd_u32((src), (src))
#endif
#define vshl(dst, src, shift) \
(dst) = vshl_n_u32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = vshr_n_u32((src), (shift))
#elif defined(__ARM_NEON__) && ARCH_BITS == 32 && DES_BS_DEPTH == 96
#include <arm_neon.h>
typedef struct {
uint32x2_t f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = veor_u32((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vmvn_u32((a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vand_u32((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vorr_u32((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vbic_u32((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vbsl_u32((c).f, (b).f, (a).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ARM_NEON__) && DES_BS_DEPTH == 128 && defined(DES_BS_2X64)
#include <arm_neon.h>
typedef struct {
uint32x2_t f, g;
} vtype;
#define vst(dst, ofs, src) \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
vst1_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = veor_u32((a).f, (b).f); \
(dst).g = veor_u32((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vmvn_u32((a).f); \
(dst).g = vmvn_u32((a).g)
#define vand(dst, a, b) \
(dst).f = vand_u32((a).f, (b).f); \
(dst).g = vand_u32((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vorr_u32((a).f, (b).f); \
(dst).g = vorr_u32((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vbic_u32((a).f, (b).f); \
(dst).g = vbic_u32((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vbsl_u32((c).f, (b).f, (a).f); \
(dst).g = vbsl_u32((c).g, (b).g, (a).g)
#elif defined(__ARM_NEON__) && DES_BS_DEPTH == 128
#include <arm_neon.h>
typedef uint32x4_t vtype;
#define vst(dst, ofs, src) \
vst1q_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
veorq_u32((a), (b))
#define vnot(dst, a) \
(dst) = vmvnq_u32((a))
#define vand(dst, a, b) \
(dst) = vandq_u32((a), (b))
#define vor(dst, a, b) \
(dst) = vorrq_u32((a), (b))
#define vandn(dst, a, b) \
(dst) = vbicq_u32((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vbslq_u32((c), (b), (a))
#if 0
#define vshl1(dst, src) \
(dst) = vaddq_u32((src), (src))
#endif
#define vshl(dst, src, shift) \
(dst) = vshlq_n_u32((src), (shift))
#define vshr(dst, src, shift) \
(dst) = vshrq_n_u32((src), (shift))
#elif defined(__ARM_NEON__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#include <arm_neon.h>
typedef struct {
uint32x4_t f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = veorq_u32((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vmvnq_u32((a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vandq_u32((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vorrq_u32((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vbicq_u32((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vbslq_u32((c).f, (b).f, (a).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ARM_NEON__) && DES_BS_DEPTH == 256
#include <arm_neon.h>
typedef struct {
uint32x4_t f, g;
} vtype;
#define vst(dst, ofs, src) \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
vst1q_u32( \
(uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = veorq_u32((a).f, (b).f); \
(dst).g = veorq_u32((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vmvnq_u32((a).f); \
(dst).g = vmvnq_u32((a).g)
#define vand(dst, a, b) \
(dst).f = vandq_u32((a).f, (b).f); \
(dst).g = vandq_u32((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vorrq_u32((a).f, (b).f); \
(dst).g = vorrq_u32((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vbicq_u32((a).f, (b).f); \
(dst).g = vbicq_u32((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vbslq_u32((c).f, (b).f, (a).f); \
(dst).g = vbslq_u32((c).g, (b).g, (a).g)
#elif defined(__ALTIVEC__) && DES_BS_DEPTH == 128
#ifdef __linux__
#include <altivec.h>
#endif
typedef vector signed int vtype;
#define vst(dst, ofs, src) \
vec_st((src), (ofs) * sizeof(DES_bs_vector), (dst))
#define vxorf(a, b) \
vec_xor((a), (b))
#define vnot(dst, a) \
(dst) = vec_nor((a), (a))
#define vand(dst, a, b) \
(dst) = vec_and((a), (b))
#define vor(dst, a, b) \
(dst) = vec_or((a), (b))
#define vandn(dst, a, b) \
(dst) = vec_andc((a), (b))
#define vsel(dst, a, b, c) \
(dst) = vec_sel((a), (b), (vector bool int)(c))
#elif defined(__ALTIVEC__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#ifdef __linux__
#include <altivec.h>
#endif
typedef struct {
vector signed int f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = vec_xor((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = vec_nor((a).f, (a).f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = vec_and((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = vec_or((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = vec_andc((a).f, (b).f); \
(dst).g = (a).g & ~(b).g
#define vsel(dst, a, b, c) \
(dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \
(dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g))
#elif defined(__ALTIVEC__) && DES_BS_DEPTH == 256
#ifdef __linux__
#include <altivec.h>
#endif
typedef struct {
vector signed int f, g;
} vtype;
#define vst(dst, ofs, src) \
vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \
vec_st((src).g, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->g)
#define vxor(dst, a, b) \
(dst).f = vec_xor((a).f, (b).f); \
(dst).g = vec_xor((a).g, (b).g)
#define vnot(dst, a) \
(dst).f = vec_nor((a).f, (a).f); \
(dst).g = vec_nor((a).g, (a).g)
#define vand(dst, a, b) \
(dst).f = vec_and((a).f, (b).f); \
(dst).g = vec_and((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = vec_or((a).f, (b).f); \
(dst).g = vec_or((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = vec_andc((a).f, (b).f); \
(dst).g = vec_andc((a).g, (b).g)
#define vsel(dst, a, b, c) \
(dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \
(dst).g = vec_sel((a).g, (b).g, (vector bool int)(c).g)
#elif defined(__AVX__) && DES_BS_DEPTH == 256 && !defined(DES_BS_NO_AVX256)
#include <immintrin.h>
/* Not __m256i because bitwise ops are "floating-point" with AVX */
typedef __m256 vtype;
#define vst(dst, ofs, src) \
_mm256_store_ps((float *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm256_xor_ps((a), (b))
#define vand(dst, a, b) \
(dst) = _mm256_and_ps((a), (b))
#define vor(dst, a, b) \
(dst) = _mm256_or_ps((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm256_andnot_ps((b), (a))
#ifdef __XOP__
/* This could be _mm256_cmov_ps(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst) = __builtin_ia32_vpcmov_v8sf256((b), (a), (c))
#endif
/*
* We should be able to do 256-bit shifts with one instruction with AVX2, but
* for plain AVX let's use pairs of 128-bit instructions (and likely incur
* extra memory stores/loads because the rest of our AVX code is 256-bit). :-(
*/
#define vshl(dst, src, shift) \
((__m128i *)&(dst))[0] = \
_mm_slli_epi64(((__m128i *)&(src))[0], (shift)); \
((__m128i *)&(dst))[1] = \
_mm_slli_epi64(((__m128i *)&(src))[1], (shift))
#define vshr(dst, src, shift) \
((__m128i *)&(dst))[0] = \
_mm_srli_epi64(((__m128i *)&(src))[0], (shift)); \
((__m128i *)&(dst))[1] = \
_mm_srli_epi64(((__m128i *)&(src))[1], (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 384 && !defined(DES_BS_NO_AVX128)
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
typedef struct {
/* Not __m256i because bitwise ops are "floating-point" with AVX */
__m256 f;
__m128i g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_ps( \
(float *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_ps((a).f, (b).f); \
(dst).g = _mm_xor_si128((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_ps((a).f, (b).f); \
(dst).g = _mm_and_si128((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_ps((a).f, (b).f); \
(dst).g = _mm_or_si128((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_ps((b).f, (a).f); \
(dst).g = _mm_andnot_si128((b).g, (a).g)
#ifdef __XOP__
/* This could be _mm256_cmov_ps(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \
(dst).g = _mm_cmov_si128((b).g, (a).g, (c).g)
#endif
#define vshl(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_slli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_slli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = _mm_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_srli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_srli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = _mm_srli_epi64((src).g, (shift))
#elif defined(__AVX__) && DES_BS_DEPTH == 512
#include <immintrin.h>
typedef struct {
/* Not __m256i because bitwise ops are "floating-point" with AVX */
__m256 f, g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_ps( \
(float *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm256_store_ps( \
(float *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_ps((a).f, (b).f); \
(dst).g = _mm256_xor_ps((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_ps((a).f, (b).f); \
(dst).g = _mm256_and_ps((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_ps((a).f, (b).f); \
(dst).g = _mm256_or_ps((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_ps((b).f, (a).f); \
(dst).g = _mm256_andnot_ps((b).g, (a).g)
#ifdef __XOP__
/* This could be _mm256_cmov_ps(), but it does not exist (yet?) */
#define vsel(dst, a, b, c) \
(dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \
(dst).g = __builtin_ia32_vpcmov_v8sf256((b).g, (a).g, (c).g)
#endif
#define vshl(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_slli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_slli_epi64(((__m128i *)&(src).f)[1], (shift)); \
((__m128i *)&(dst).g)[0] = \
_mm_slli_epi64(((__m128i *)&(src).g)[0], (shift)); \
((__m128i *)&(dst).g)[1] = \
_mm_slli_epi64(((__m128i *)&(src).g)[1], (shift))
#define vshr(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_srli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_srli_epi64(((__m128i *)&(src).f)[1], (shift)); \
((__m128i *)&(dst).g)[0] = \
_mm_srli_epi64(((__m128i *)&(src).g)[0], (shift)); \
((__m128i *)&(dst).g)[1] = \
_mm_srli_epi64(((__m128i *)&(src).g)[1], (shift))
#elif defined(__AVX__) && defined(__MMX__) && DES_BS_DEPTH == 320 && \
!defined(DES_BS_NO_MMX)
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
/* Not __m256i because bitwise ops are "floating-point" with AVX */
__m256 f;
__m64 g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_ps( \
(float *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_ps((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm256_and_ps((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm256_or_ps((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_ps((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g)
#define vshl(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_slli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_slli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift))
#define vshr(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_srli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_srli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift))
#elif defined(__AVX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 320) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 288))
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
/* Not __m256i because bitwise ops are "floating-point" with AVX */
__m256 f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_ps( \
(float *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_ps((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm256_xor_ps((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm256_and_ps((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm256_or_ps((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_ps((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_slli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_slli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_srli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_srli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = (src).g >> (shift)
#elif defined(__AVX__) && defined(__MMX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 384) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 352))
#include <immintrin.h>
#include <mmintrin.h>
typedef struct {
/* Not __m256i because bitwise ops are "floating-point" with AVX */
__m256 f;
__m64 g;
unsigned ARCH_WORD h;
} vtype;
#define vst(dst, ofs, src) \
_mm256_store_ps( \
(float *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h
#define vxor(dst, a, b) \
(dst).f = _mm256_xor_ps((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g); \
(dst).h = (a).h ^ (b).h
#define vnot(dst, a) \
(dst).f = _mm256_xor_ps((a).f, vones.f); \
(dst).g = _mm_xor_si64((a).g, vones.g); \
(dst).h = ~(a).h
#define vand(dst, a, b) \
(dst).f = _mm256_and_ps((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g); \
(dst).h = (a).h & (b).h
#define vor(dst, a, b) \
(dst).f = _mm256_or_ps((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g); \
(dst).h = (a).h | (b).h
#define vandn(dst, a, b) \
(dst).f = _mm256_andnot_ps((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g); \
(dst).h = (a).h & ~(b).h
#define vshl(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_slli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_slli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift)); \
(dst).h = (src).h << (shift)
#define vshr(dst, src, shift) \
((__m128i *)&(dst).f)[0] = \
_mm_srli_epi64(((__m128i *)&(src).f)[0], (shift)); \
((__m128i *)&(dst).f)[1] = \
_mm_srli_epi64(((__m128i *)&(src).f)[1], (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift)); \
(dst).h = (src).h >> (shift)
#elif defined(__SSE2__) && DES_BS_DEPTH == 128
#ifdef __AVX__
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#else
#include <emmintrin.h>
#endif
typedef __m128i vtype;
#define vst(dst, ofs, src) \
_mm_store_si128((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src))
#define vxorf(a, b) \
_mm_xor_si128((a), (b))
#define vand(dst, a, b) \
(dst) = _mm_and_si128((a), (b))
#define vor(dst, a, b) \
(dst) = _mm_or_si128((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm_andnot_si128((b), (a))
#ifdef __XOP__
#define vsel(dst, a, b, c) \
(dst) = _mm_cmov_si128((b), (a), (c))
#else
#define vsel(dst, a, b, c) \
(dst) = _mm_xor_si128(_mm_andnot_si128((c), (a)), \
_mm_and_si128((c), (b)))
#endif
#define vshl1(dst, src) \
(dst) = _mm_add_epi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm_slli_epi64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm_srli_epi64((src), (shift))
#elif defined(__SSE2__) && DES_BS_DEPTH == 256 && defined(DES_BS_NO_MMX)
#ifdef __AVX__
#include <immintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#else
#include <emmintrin.h>
#endif
typedef struct {
__m128i f, g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \
(src).g)
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si128((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si128((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si128((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si128((b).g, (a).g)
#ifdef __XOP__
#define vsel(dst, a, b, c) \
(dst).f = _mm_cmov_si128((b).f, (a).f, (c).f); \
(dst).g = _mm_cmov_si128((b).g, (a).g, (c).g)
#endif
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_epi8((src).g, (src).g)
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_epi64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_epi64((src).g, (shift))
#elif defined(__SSE2__) && defined(__MMX__) && DES_BS_DEPTH == 192 && \
!defined(DES_BS_NO_MMX)
#include <emmintrin.h>
#include <mmintrin.h>
typedef struct {
__m128i f;
__m64 g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g)
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g)
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g)
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g)
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_pi8((src).g, (src).g)
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift))
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift))
#elif defined(__SSE2__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 160))
#include <emmintrin.h>
typedef struct {
__m128i f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm_xor_si128((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = (src).g << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#elif defined(__SSE2__) && defined(__MMX__) && \
((ARCH_BITS == 64 && DES_BS_DEPTH == 256) || \
(ARCH_BITS == 32 && DES_BS_DEPTH == 224))
#include <emmintrin.h>
#include <mmintrin.h>
typedef struct {
__m128i f;
__m64 g;
unsigned ARCH_WORD h;
} vtype;
#define vst(dst, ofs, src) \
_mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \
(src).f); \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si128((a).f, (b).f); \
(dst).g = _mm_xor_si64((a).g, (b).g); \
(dst).h = (a).h ^ (b).h
#define vnot(dst, a) \
(dst).f = _mm_xor_si128((a).f, vones.f); \
(dst).g = _mm_xor_si64((a).g, vones.g); \
(dst).h = ~(a).h
#define vand(dst, a, b) \
(dst).f = _mm_and_si128((a).f, (b).f); \
(dst).g = _mm_and_si64((a).g, (b).g); \
(dst).h = (a).h & (b).h
#define vor(dst, a, b) \
(dst).f = _mm_or_si128((a).f, (b).f); \
(dst).g = _mm_or_si64((a).g, (b).g); \
(dst).h = (a).h | (b).h
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si128((b).f, (a).f); \
(dst).g = _mm_andnot_si64((b).g, (a).g); \
(dst).h = (a).h & ~(b).h
#define vshl1(dst, src) \
(dst).f = _mm_add_epi8((src).f, (src).f); \
(dst).g = _mm_add_pi8((src).g, (src).g); \
(dst).h = (src).h << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_epi64((src).f, (shift)); \
(dst).g = _mm_slli_si64((src).g, (shift)); \
(dst).h = (src).h << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_epi64((src).f, (shift)); \
(dst).g = _mm_srli_si64((src).g, (shift)); \
(dst).h = (src).h >> (shift)
#elif defined(__MMX__) && ARCH_BITS != 64 && DES_BS_DEPTH == 64
#include <mmintrin.h>
typedef __m64 vtype;
#define vxorf(a, b) \
_mm_xor_si64((a), (b))
#define vand(dst, a, b) \
(dst) = _mm_and_si64((a), (b))
#define vor(dst, a, b) \
(dst) = _mm_or_si64((a), (b))
#define vandn(dst, a, b) \
(dst) = _mm_andnot_si64((b), (a))
#define vshl1(dst, src) \
(dst) = _mm_add_pi8((src), (src))
#define vshl(dst, src, shift) \
(dst) = _mm_slli_si64((src), (shift))
#define vshr(dst, src, shift) \
(dst) = _mm_srli_si64((src), (shift))
#elif defined(__MMX__) && ARCH_BITS == 32 && DES_BS_DEPTH == 96
#include <mmintrin.h>
typedef struct {
__m64 f;
unsigned ARCH_WORD g;
} vtype;
#define vst(dst, ofs, src) \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f = (src).f; \
((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g
#define vxor(dst, a, b) \
(dst).f = _mm_xor_si64((a).f, (b).f); \
(dst).g = (a).g ^ (b).g
#define vnot(dst, a) \
(dst).f = _mm_xor_si64((a).f, vones.f); \
(dst).g = ~(a).g
#define vand(dst, a, b) \
(dst).f = _mm_and_si64((a).f, (b).f); \
(dst).g = (a).g & (b).g
#define vor(dst, a, b) \
(dst).f = _mm_or_si64((a).f, (b).f); \
(dst).g = (a).g | (b).g
#define vandn(dst, a, b) \
(dst).f = _mm_andnot_si64((b).f, (a).f); \
(dst).g = (a).g & ~(b).g
#define vshl1(dst, src) \
(dst).f = _mm_add_pi8((src).f, (src).f); \
(dst).g = (src).g << 1
#define vshl(dst, src, shift) \
(dst).f = _mm_slli_si64((src).f, (shift)); \
(dst).g = (src).g << (shift)
#define vshr(dst, src, shift) \
(dst).f = _mm_srli_si64((src).f, (shift)); \
(dst).g = (src).g >> (shift)
#else
#if DES_BS_VECTOR
#undef DES_BS_VECTOR_LOOPS
#define DES_BS_VECTOR_LOOPS 1
#endif
typedef unsigned ARCH_WORD vtype;
#define vxorf(a, b) \
((a) ^ (b))
#define vnot(dst, a) \
(dst) = ~(a)
#define vand(dst, a, b) \
(dst) = (a) & (b)
#define vor(dst, a, b) \
(dst) = (a) | (b)
#define vandn(dst, a, b) \
(dst) = (a) & ~(b)
#define vsel(dst, a, b, c) \
(dst) = (((a) & ~(c)) ^ ((b) & (c)))
#define vshl(dst, src, shift) \
(dst) = (src) << (shift)
#define vshr(dst, src, shift) \
(dst) = (src) >> (shift)
/* Assume that 0 always fits in one load immediate instruction */
#undef vzero
#define vzero 0
/* Archs friendly to use of immediate values */
#if defined(__x86_64__) || defined(__i386__)
#undef vones
#define vones (~(vtype)0)
#endif
#endif
#ifndef vst
#define vst(dst, ofs, src) \
*((vtype *)((DES_bs_vector *)&(dst) + (ofs))) = (src)
#endif
#if !defined(vxor) && defined(vxorf)
#define vxor(dst, a, b) \
(dst) = vxorf((a), (b))
#endif
#if !defined(vxorf) && defined(vxor)
/*
* This requires gcc's "Statement Exprs" extension (also supported by a number
* of other C compilers).
*/
#define vxorf(a, b) \
({ vtype tmp; vxor(tmp, (a), (b)); tmp; })
#endif
#ifndef vnot
#define vnot(dst, a) \
vxor((dst), (a), vones)
#endif
#ifndef vshl1
#define vshl1(dst, src) \
vshl((dst), (src), 1)
#endif
#if !DES_BS_VECTOR_LOOPS && defined(vshl) && defined(vshr)
#define DES_BS_VECTOR_LOOPS_K 0
#define DEPTH_K
#define for_each_depth_k()
#define kvtype vtype
#define kvand vand
#define kvor vor
#define kvshl1 vshl1
#define kvshl vshl
#define kvshr vshr
#else
#if DES_BS_VECTOR
#define DES_BS_VECTOR_LOOPS_K 1
#define DEPTH_K [depth]
#define for_each_depth_k() \
for (depth = 0; depth < DES_BS_VECTOR; depth++)
#else
#define DES_BS_VECTOR_LOOPS_K 0
#endif
typedef unsigned ARCH_WORD kvtype;
#define kvand(dst, a, b) \
(dst) = (a) & (b)
#define kvor(dst, a, b) \
(dst) = (a) | (b)
#define kvshl1(dst, src) \
(dst) = (src) << 1
#define kvshl(dst, src, shift) \
(dst) = (src) << (shift)
#define kvshr(dst, src, shift) \
(dst) = (src) >> (shift)
#endif
#if !DES_BS_VECTOR || DES_BS_VECTOR_LOOPS_K
#ifdef __x86_64__
#define mask01 0x0101010101010101UL
#elif __i386__
#define mask01 0x01010101UL
#else
#undef mask01
#endif
#ifdef mask01
#define mask02 (mask01 << 1)
#define mask04 (mask01 << 2)
#define mask08 (mask01 << 3)
#define mask10 (mask01 << 4)
#define mask20 (mask01 << 5)
#define mask40 (mask01 << 6)
#define mask80 (mask01 << 7)
#endif
#endif
#ifndef mask01
#define mask01 (*(kvtype *)&DES_bs_all.masks[0])
#define mask02 (*(kvtype *)&DES_bs_all.masks[1])
#define mask04 (*(kvtype *)&DES_bs_all.masks[2])
#define mask08 (*(kvtype *)&DES_bs_all.masks[3])
#define mask10 (*(kvtype *)&DES_bs_all.masks[4])
#define mask20 (*(kvtype *)&DES_bs_all.masks[5])
#define mask40 (*(kvtype *)&DES_bs_all.masks[6])
#define mask80 (*(kvtype *)&DES_bs_all.masks[7])
#endif
#ifdef __i386__
/* register-starved */
#define LOAD_V \
kvtype v0 = *(kvtype *)&vp[0]; \
kvtype v4 = *(kvtype *)&vp[4];
#define v1 *(kvtype *)&vp[1]
#define v2 *(kvtype *)&vp[2]
#define v3 *(kvtype *)&vp[3]
#define v5 *(kvtype *)&vp[5]
#define v6 *(kvtype *)&vp[6]
#define v7 *(kvtype *)&vp[7]
#else
#define LOAD_V \
kvtype v0 = *(kvtype *)&vp[0]; \
kvtype v1 = *(kvtype *)&vp[1]; \
kvtype v2 = *(kvtype *)&vp[2]; \
kvtype v3 = *(kvtype *)&vp[3]; \
kvtype v4 = *(kvtype *)&vp[4]; \
kvtype v5 = *(kvtype *)&vp[5]; \
kvtype v6 = *(kvtype *)&vp[6]; \
kvtype v7 = *(kvtype *)&vp[7];
#endif
#define kvand_shl1_or(dst, src, mask) \
kvand(tmp, src, mask); \
kvshl1(tmp, tmp); \
kvor(dst, dst, tmp)
#define kvand_shl_or(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshl(tmp, tmp, shift); \
kvor(dst, dst, tmp)
#define kvand_shl1(dst, src, mask) \
kvand(tmp, src, mask); \
kvshl1(dst, tmp)
#define kvand_or(dst, src, mask) \
kvand(tmp, src, mask); \
kvor(dst, dst, tmp)
#define kvand_shr_or(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshr(tmp, tmp, shift); \
kvor(dst, dst, tmp)
#define kvand_shr(dst, src, mask, shift) \
kvand(tmp, src, mask); \
kvshr(dst, tmp, shift)
#define FINALIZE_NEXT_KEY_BIT_0 { \
kvtype m = mask01, va, vb, tmp; \
kvand(va, v0, m); \
kvand_shl1(vb, v1, m); \
kvand_shl_or(va, v2, m, 2); \
kvand_shl_or(vb, v3, m, 3); \
kvand_shl_or(va, v4, m, 4); \
kvand_shl_or(vb, v5, m, 5); \
kvand_shl_or(va, v6, m, 6); \
kvand_shl_or(vb, v7, m, 7); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_1 { \
kvtype m = mask02, va, vb, tmp; \
kvand_shr(va, v0, m, 1); \
kvand(vb, v1, m); \
kvand_shl1_or(va, v2, m); \
kvand_shl_or(vb, v3, m, 2); \
kvand_shl_or(va, v4, m, 3); \
kvand_shl_or(vb, v5, m, 4); \
kvand_shl_or(va, v6, m, 5); \
kvand_shl_or(vb, v7, m, 6); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_2 { \
kvtype m = mask04, va, vb, tmp; \
kvand_shr(va, v0, m, 2); \
kvand_shr(vb, v1, m, 1); \
kvand_or(va, v2, m); \
kvand_shl1_or(vb, v3, m); \
kvand_shl_or(va, v4, m, 2); \
kvand_shl_or(vb, v5, m, 3); \
kvand_shl_or(va, v6, m, 4); \
kvand_shl_or(vb, v7, m, 5); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_3 { \
kvtype m = mask08, va, vb, tmp; \
kvand_shr(va, v0, m, 3); \
kvand_shr(vb, v1, m, 2); \
kvand_shr_or(va, v2, m, 1); \
kvand_or(vb, v3, m); \
kvand_shl1_or(va, v4, m); \
kvand_shl_or(vb, v5, m, 2); \
kvand_shl_or(va, v6, m, 3); \
kvand_shl_or(vb, v7, m, 4); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_4 { \
kvtype m = mask10, va, vb, tmp; \
kvand_shr(va, v0, m, 4); \
kvand_shr(vb, v1, m, 3); \
kvand_shr_or(va, v2, m, 2); \
kvand_shr_or(vb, v3, m, 1); \
kvand_or(va, v4, m); \
kvand_shl1_or(vb, v5, m); \
kvand_shl_or(va, v6, m, 2); \
kvand_shl_or(vb, v7, m, 3); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_5 { \
kvtype m = mask20, va, vb, tmp; \
kvand_shr(va, v0, m, 5); \
kvand_shr(vb, v1, m, 4); \
kvand_shr_or(va, v2, m, 3); \
kvand_shr_or(vb, v3, m, 2); \
kvand_shr_or(va, v4, m, 1); \
kvand_or(vb, v5, m); \
kvand_shl1_or(va, v6, m); \
kvand_shl_or(vb, v7, m, 2); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_6 { \
kvtype m = mask40, va, vb, tmp; \
kvand_shr(va, v0, m, 6); \
kvand_shr(vb, v1, m, 5); \
kvand_shr_or(va, v2, m, 4); \
kvand_shr_or(vb, v3, m, 3); \
kvand_shr_or(va, v4, m, 2); \
kvand_shr_or(vb, v5, m, 1); \
kvand_or(va, v6, m); \
kvand_shl1_or(vb, v7, m); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#define FINALIZE_NEXT_KEY_BIT_7 { \
kvtype m = mask80, va, vb, tmp; \
kvand_shr(va, v0, m, 7); \
kvand_shr(vb, v1, m, 6); \
kvand_shr_or(va, v2, m, 5); \
kvand_shr_or(vb, v3, m, 4); \
kvand_shr_or(va, v4, m, 3); \
kvand_shr_or(vb, v5, m, 2); \
kvand_shr_or(va, v6, m, 1); \
kvand_or(vb, v7, m); \
kvor(*(kvtype *)kp, va, vb); \
kp++; \
}
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 8; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
}
}
#if DES_BS_EXPAND
{
int index;
for (index = 0; index < 0x300; index++)
for_each_depth_k() {
#if DES_BS_VECTOR_LOOPS_K
DES_bs_all.KS.v[index] DEPTH_K =
DES_bs_all.KSp[index] DEPTH_K;
#else
vst(*(kvtype *)&DES_bs_all.KS.v[index], 0,
*(kvtype *)DES_bs_all.KSp[index]);
#endif
}
}
#endif
}
#endif
#if DES_bs_mt
MAYBE_INLINE void DES_bs_set_salt_for_thread(int t, unsigned int salt)
#else
void DES_bs_set_salt(ARCH_WORD salt)
#endif
{
unsigned int new = salt;
unsigned int old = DES_bs_all.salt;
int dst;
DES_bs_all.salt = new;
for (dst = 0; dst < 24; dst++) {
if ((new ^ old) & 1) {
DES_bs_vector *sp1, *sp2;
int src1 = dst;
int src2 = dst + 24;
if (new & 1) {
src1 = src2;
src2 = dst;
}
sp1 = DES_bs_all.Ens[src1];
sp2 = DES_bs_all.Ens[src2];
DES_bs_all.E.E[dst] = (ARCH_WORD *)sp1;
DES_bs_all.E.E[dst + 24] = (ARCH_WORD *)sp2;
DES_bs_all.E.E[dst + 48] = (ARCH_WORD *)(sp1 + 32);
DES_bs_all.E.E[dst + 72] = (ARCH_WORD *)(sp2 + 32);
}
new >>= 1;
old >>= 1;
if (new == old)
break;
}
}
#if !DES_BS_ASM
/* Include the S-boxes here so that the compiler can inline them */
#if DES_BS == 3
#include "sboxes-s.c"
#elif DES_BS == 2
#include "sboxes.c"
#else
#undef andn
#include "nonstd.c"
#endif
#define b DES_bs_all.B
#define e DES_bs_all.E.E
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#define bd [depth]
#define ed [depth]
#define DEPTH [depth]
#define for_each_depth() \
for (depth = 0; depth < DES_BS_VECTOR; depth++)
#else
#if DES_BS_EXPAND
#define kd
#else
#define kd [0]
#endif
#define bd
#define ed [0]
#define DEPTH
#define for_each_depth()
#endif
#define DES_bs_clear_block_8(i) \
for_each_depth() { \
vst(b[i] bd, 0, zero); \
vst(b[i] bd, 1, zero); \
vst(b[i] bd, 2, zero); \
vst(b[i] bd, 3, zero); \
vst(b[i] bd, 4, zero); \
vst(b[i] bd, 5, zero); \
vst(b[i] bd, 6, zero); \
vst(b[i] bd, 7, zero); \
}
#define DES_bs_clear_block \
DES_bs_clear_block_8(0); \
DES_bs_clear_block_8(8); \
DES_bs_clear_block_8(16); \
DES_bs_clear_block_8(24); \
DES_bs_clear_block_8(32); \
DES_bs_clear_block_8(40); \
DES_bs_clear_block_8(48); \
DES_bs_clear_block_8(56);
#define DES_bs_set_block_8(i, v0, v1, v2, v3, v4, v5, v6, v7) \
for_each_depth() { \
vst(b[i] bd, 0, v0); \
vst(b[i] bd, 1, v1); \
vst(b[i] bd, 2, v2); \
vst(b[i] bd, 3, v3); \
vst(b[i] bd, 4, v4); \
vst(b[i] bd, 5, v5); \
vst(b[i] bd, 6, v6); \
vst(b[i] bd, 7, v7); \
}
#define x(p) vxorf(*(vtype *)&e[p] ed, *(vtype *)&k[p] kd)
#define y(p, q) vxorf(*(vtype *)&b[p] bd, *(vtype *)&k[q] kd)
#define z(r) ((vtype *)&b[r] bd)
void DES_bs_crypt_25(int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count)
#endif
for_each_t(n) {
#if DES_BS_EXPAND
DES_bs_vector *k;
#else
ARCH_WORD **k;
#endif
int iterations, rounds_and_swapped;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
if (DES_bs_all.keys_changed)
goto finalize_keys;
body:
#if DES_bs_mt
DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);
#endif
{
vtype zero = vzero;
DES_bs_clear_block
}
#if DES_BS_EXPAND
k = DES_bs_all.KS.v;
#else
k = DES_bs_all.KS.p;
#endif
rounds_and_swapped = 8;
iterations = 25;
start:
for_each_depth()
s1(x(0), x(1), x(2), x(3), x(4), x(5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(x(6), x(7), x(8), x(9), x(10), x(11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(x(24), x(25), x(26), x(27), x(28), x(29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(x(30), x(31), x(32), x(33), x(34), x(35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
if (rounds_and_swapped == 0x100) goto next;
swap:
for_each_depth()
s1(x(48), x(49), x(50), x(51), x(52), x(53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(x(54), x(55), x(56), x(57), x(58), x(59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(x(72), x(73), x(74), x(75), x(76), x(77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(x(78), x(79), x(80), x(81), x(82), x(83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
if (--rounds_and_swapped) goto start;
k -= (0x300 + 48);
rounds_and_swapped = 0x108;
if (--iterations) goto swap;
#if DES_bs_mt
continue;
#else
return;
#endif
next:
k -= (0x300 - 48);
rounds_and_swapped = 8;
iterations--;
goto start;
finalize_keys:
DES_bs_all.keys_changed = 0;
#if DES_bs_mt
DES_bs_finalize_keys(t);
#else
DES_bs_finalize_keys();
#endif
goto body;
}
}
void DES_bs_crypt(int count, int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, count, keys_count)
#endif
for_each_t(n) {
#if DES_BS_EXPAND
DES_bs_vector *k;
#else
ARCH_WORD **k;
#endif
int iterations, rounds_and_swapped;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
if (DES_bs_all.keys_changed)
goto finalize_keys;
body:
#if DES_bs_mt
DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt);
#endif
{
vtype zero = vzero;
DES_bs_clear_block
}
#if DES_BS_EXPAND
k = DES_bs_all.KS.v;
#else
k = DES_bs_all.KS.p;
#endif
rounds_and_swapped = 8;
iterations = count;
start:
for_each_depth()
s1(x(0), x(1), x(2), x(3), x(4), x(5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(x(6), x(7), x(8), x(9), x(10), x(11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(x(12), x(13), x(14), x(15), x(16), x(17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(x(18), x(19), x(20), x(21), x(22), x(23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(x(24), x(25), x(26), x(27), x(28), x(29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(x(30), x(31), x(32), x(33), x(34), x(35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(x(36), x(37), x(38), x(39), x(40), x(41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(x(42), x(43), x(44), x(45), x(46), x(47),
z(36), z(58), z(46), z(52));
if (rounds_and_swapped == 0x100) goto next;
swap:
for_each_depth()
s1(x(48), x(49), x(50), x(51), x(52), x(53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(x(54), x(55), x(56), x(57), x(58), x(59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(x(60), x(61), x(62), x(63), x(64), x(65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(x(66), x(67), x(68), x(69), x(70), x(71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(x(72), x(73), x(74), x(75), x(76), x(77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(x(78), x(79), x(80), x(81), x(82), x(83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(x(84), x(85), x(86), x(87), x(88), x(89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(x(90), x(91), x(92), x(93), x(94), x(95),
z(4), z(26), z(14), z(20));
k += 96;
if (--rounds_and_swapped) goto start;
k -= (0x300 + 48);
rounds_and_swapped = 0x108;
if (--iterations) goto swap;
#if DES_bs_mt
continue;
#else
return;
#endif
next:
k -= (0x300 - 48);
rounds_and_swapped = 8;
if (--iterations) goto start;
#if DES_bs_mt
continue;
#else
return;
#endif
finalize_keys:
DES_bs_all.keys_changed = 0;
#if DES_bs_mt
DES_bs_finalize_keys(t);
#else
DES_bs_finalize_keys();
#endif
goto body;
}
}
#undef x
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys_LM(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys_LM(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 7; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
FINALIZE_NEXT_KEY_BIT_7
}
}
}
#undef kd
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#else
#define kd [0]
#endif
int DES_bs_crypt_LM(int *pcount, struct db_salt *salt)
{
int keys_count = *pcount;
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count)
#endif
for_each_t(n) {
ARCH_WORD **k;
int rounds;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
{
vtype z = vzero, o = vones;
DES_bs_set_block_8(0, z, z, z, z, z, z, z, z);
DES_bs_set_block_8(8, o, o, o, z, o, z, z, z);
DES_bs_set_block_8(16, z, z, z, z, z, z, z, o);
DES_bs_set_block_8(24, z, z, o, z, z, o, o, o);
DES_bs_set_block_8(32, z, z, z, o, z, o, o, o);
DES_bs_set_block_8(40, z, z, z, z, z, o, z, z);
DES_bs_set_block_8(48, o, o, z, z, z, z, o, z);
DES_bs_set_block_8(56, o, z, o, z, o, o, o, o);
}
#if DES_bs_mt
DES_bs_finalize_keys_LM(t);
#else
DES_bs_finalize_keys_LM();
#endif
k = DES_bs_all.KS.p;
rounds = 8;
do {
for_each_depth()
s1(y(31, 0), y(0, 1), y(1, 2),
y(2, 3), y(3, 4), y(4, 5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(y(3, 6), y(4, 7), y(5, 8),
y(6, 9), y(7, 10), y(8, 11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(y(15, 24), y(16, 25), y(17, 26),
y(18, 27), y(19, 28), y(20, 29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(y(19, 30), y(20, 31), y(21, 32),
y(22, 33), y(23, 34), y(24, 35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
for_each_depth()
s1(y(63, 48), y(32, 49), y(33, 50),
y(34, 51), y(35, 52), y(36, 53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(y(35, 54), y(36, 55), y(37, 56),
y(38, 57), y(39, 58), y(40, 59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(y(47, 72), y(48, 73), y(49, 74),
y(50, 75), y(51, 76), y(52, 77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(y(51, 78), y(52, 79), y(53, 80),
y(54, 81), y(55, 82), y(56, 83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
} while (--rounds);
}
return keys_count;
}
#if DES_bs_mt
static MAYBE_INLINE void DES_bs_finalize_keys_plain(int t)
#else
static MAYBE_INLINE void DES_bs_finalize_keys_plain(void)
#endif
{
#if DES_BS_VECTOR_LOOPS_K
int depth;
#endif
for_each_depth_k() {
DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K;
int ic;
for (ic = 0; ic < 8; ic++) {
DES_bs_vector *vp =
(DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K;
LOAD_V
FINALIZE_NEXT_KEY_BIT_0
FINALIZE_NEXT_KEY_BIT_1
FINALIZE_NEXT_KEY_BIT_2
FINALIZE_NEXT_KEY_BIT_3
FINALIZE_NEXT_KEY_BIT_4
FINALIZE_NEXT_KEY_BIT_5
FINALIZE_NEXT_KEY_BIT_6
}
}
}
#undef v1
#undef v2
#undef v3
#undef v5
#undef v6
#undef v7
/* Single Des Encryption with no salt */
#undef kd
#if DES_BS_VECTOR_LOOPS
#define kd [depth]
#else
#define kd [0]
#endif
#if DES_BS_VECTOR
#define INDX [index]
#else
#define INDX
#endif
void DES_bs_crypt_plain(int keys_count)
{
#if DES_bs_mt
int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH;
#endif
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count, DES_bs_P)
#endif
for_each_t(n) {
ARCH_WORD **k;
int rounds;
#if DES_BS_VECTOR_LOOPS
int depth;
#endif
int i;
#if DES_BS_VECTOR
int index;
#endif
for(i=0; i<64; i++)
{
#if DES_BS_VECTOR
for(index=0; index<DES_BS_VECTOR_SIZE; index++)
#endif
DES_bs_all.B[i]INDX = DES_bs_P[i]INDX;
}
#if DES_bs_mt
DES_bs_finalize_keys_plain(t);
#else
DES_bs_finalize_keys_plain();
#endif
k = DES_bs_all.KS.p;
rounds = 8;
do {
for_each_depth()
s1(y(31, 0), y(0, 1), y(1, 2),
y(2, 3), y(3, 4), y(4, 5),
z(40), z(48), z(54), z(62));
for_each_depth()
s2(y(3, 6), y(4, 7), y(5, 8),
y(6, 9), y(7, 10), y(8, 11),
z(44), z(59), z(33), z(49));
for_each_depth()
s3(y(7, 12), y(8, 13), y(9, 14),
y(10, 15), y(11, 16), y(12, 17),
z(55), z(47), z(61), z(37));
for_each_depth()
s4(y(11, 18), y(12, 19), y(13, 20),
y(14, 21), y(15, 22), y(16, 23),
z(57), z(51), z(41), z(32));
for_each_depth()
s5(y(15, 24), y(16, 25), y(17, 26),
y(18, 27), y(19, 28), y(20, 29),
z(39), z(45), z(56), z(34));
for_each_depth()
s6(y(19, 30), y(20, 31), y(21, 32),
y(22, 33), y(23, 34), y(24, 35),
z(35), z(60), z(42), z(50));
for_each_depth()
s7(y(23, 36), y(24, 37), y(25, 38),
y(26, 39), y(27, 40), y(28, 41),
z(63), z(43), z(53), z(38));
for_each_depth()
s8(y(27, 42), y(28, 43), y(29, 44),
y(30, 45), y(31, 46), y(0, 47),
z(36), z(58), z(46), z(52));
for_each_depth()
s1(y(63, 48), y(32, 49), y(33, 50),
y(34, 51), y(35, 52), y(36, 53),
z(8), z(16), z(22), z(30));
for_each_depth()
s2(y(35, 54), y(36, 55), y(37, 56),
y(38, 57), y(39, 58), y(40, 59),
z(12), z(27), z(1), z(17));
for_each_depth()
s3(y(39, 60), y(40, 61), y(41, 62),
y(42, 63), y(43, 64), y(44, 65),
z(23), z(15), z(29), z(5));
for_each_depth()
s4(y(43, 66), y(44, 67), y(45, 68),
y(46, 69), y(47, 70), y(48, 71),
z(25), z(19), z(9), z(0));
for_each_depth()
s5(y(47, 72), y(48, 73), y(49, 74),
y(50, 75), y(51, 76), y(52, 77),
z(7), z(13), z(24), z(2));
for_each_depth()
s6(y(51, 78), y(52, 79), y(53, 80),
y(54, 81), y(55, 82), y(56, 83),
z(3), z(28), z(10), z(18));
for_each_depth()
s7(y(55, 84), y(56, 85), y(57, 86),
y(58, 87), y(59, 88), y(60, 89),
z(31), z(11), z(21), z(6));
for_each_depth()
s8(y(59, 90), y(60, 91), y(61, 92),
y(62, 93), y(63, 94), y(32, 95),
z(4), z(26), z(14), z(20));
k += 96;
} while (--rounds);
}}
#endif
#ifdef INDX
#undef INDX
#endif
#if DES_BS_VECTOR
#define INDX [k]
#else
#define INDX
#endif
void DES_bs_generate_plaintext(unsigned char *plaintext)
{
int i, j;
#if DES_BS_VECTOR
int k;
#endif
/* Set same plaintext for all bit layers */
for (i = 0; i < 64; i++) {
j = (int) (plaintext[i/8] >> (7-(i%8))) & 0x01;
if(j==1)
j = -1;
#if DES_BS_VECTOR
for(k=0; k<DES_BS_VECTOR_SIZE; k++)
#endif
DES_bs_P[i]INDX = j;
}
}
|
GB_unop__identity_fc64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_uint64)
// op(A') function: GB (_unop_tran__identity_fc64_uint64)
// C type: GxB_FC64_t
// A type: uint64_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_uint64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_saxpy3_slice_balanced.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: construct balanced tasks for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If the mask is present but must be discarded, this function returns
// GrB_NO_VALUE, to indicate that the analysis was terminated early.
#include "GB_AxB_saxpy3.h"
// control parameters for generating parallel tasks
#define GB_NTASKS_PER_THREAD 2
#define GB_COSTLY 1.2
#define GB_FINE_WORK 2
#define GB_MWORK_ALPHA 0.01
#define GB_MWORK_BETA 0.10
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Fine_fl, int64_t) ; \
GB_WERK_POP (Fine_slice, int64_t) ; \
GB_WERK_POP (Coarse_Work, int64_t) ; \
GB_WERK_POP (Coarse_initial, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_FREE_WORK (&SaxpyTasks, SaxpyTasks_size) ; \
}
//------------------------------------------------------------------------------
// GB_hash_table_size
//------------------------------------------------------------------------------
// flmax is the max flop count for computing A*B(:,j), for any vector j that
// this task computes. If the mask M is present, flmax also includes the
// number of entries in M(:,j). GB_hash_table_size determines the hash table
// size for this task, which is twice the smallest power of 2 larger than
// flmax. If flmax is large enough, the hash_size is returned as cvlen, so
// that Gustavson's method will be used instead of the Hash method.
// By default, Gustavson vs Hash is selected automatically. AxB_method can be
// selected via the descriptor or a global setting, as the non-default
// GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of
// either of those methods. However, if Hash is selected but the hash table
// equals or exceeds cvlen, then Gustavson's method is used instead.
static inline int64_t GB_hash_table_size
(
int64_t flmax, // max flop count for any vector computed by this task
int64_t cvlen, // vector length of C
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
int64_t hash_size ;
if (AxB_method == GxB_AxB_GUSTAVSON || flmax >= cvlen/2)
{
//----------------------------------------------------------------------
// use Gustavson if selected explicitly or if flmax is large
//----------------------------------------------------------------------
hash_size = cvlen ;
}
else
{
//----------------------------------------------------------------------
// flmax is small; consider hash vs Gustavson
//----------------------------------------------------------------------
// hash_size = 2 * (smallest power of 2 >= flmax)
hash_size = ((uint64_t) 2) << (GB_FLOOR_LOG2 (flmax) + 1) ;
bool use_Gustavson ;
if (AxB_method == GxB_AxB_HASH)
{
// always use Hash method, unless the hash_size >= cvlen
use_Gustavson = (hash_size >= cvlen) ;
}
else
{
// default: auto selection:
// use Gustavson's method if hash_size is too big
use_Gustavson = (hash_size >= cvlen/12) ;
}
if (use_Gustavson)
{
hash_size = cvlen ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (hash_size) ;
}
//------------------------------------------------------------------------------
// GB_create_coarse_task: create a single coarse task
//------------------------------------------------------------------------------
// Compute the max flop count for any vector in a coarse task, determine the
// hash table size, and construct the coarse task.
static inline void GB_create_coarse_task
(
int64_t kfirst, // coarse task consists of vectors kfirst:klast
int64_t klast,
GB_saxpy3task_struct *SaxpyTasks,
int taskid, // taskid for this coarse task
int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B
int64_t cvlen, // vector length of B and C
double chunk,
int nthreads_max,
int64_t *Coarse_Work, // workspace for parallel reduction for flop count
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
//--------------------------------------------------------------------------
// find the max # of flops for any vector in this task
//--------------------------------------------------------------------------
int64_t nk = klast - kfirst + 1 ;
int nth = GB_nthreads (nk, chunk, nthreads_max) ;
// each thread finds the max flop count for a subset of the vectors
int tid ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (tid = 0 ; tid < nth ; tid++)
{
int64_t my_flmax = 1, istart, iend ;
GB_PARTITION (istart, iend, nk, tid, nth) ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t kk = kfirst + i ;
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
my_flmax = GB_IMAX (my_flmax, fl) ;
}
Coarse_Work [tid] = my_flmax ;
}
// combine results from each thread
int64_t flmax = 1 ;
for (tid = 0 ; tid < nth ; tid++)
{
flmax = GB_IMAX (flmax, Coarse_Work [tid]) ;
}
// check the parallel computation
#ifdef GB_DEBUG
int64_t flmax2 = 1 ;
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
flmax2 = GB_IMAX (flmax2, fl) ;
}
ASSERT (flmax == flmax2) ;
#endif
//--------------------------------------------------------------------------
// define the coarse task
//--------------------------------------------------------------------------
SaxpyTasks [taskid].start = kfirst ;
SaxpyTasks [taskid].end = klast ;
SaxpyTasks [taskid].vector = -1 ;
SaxpyTasks [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ;
SaxpyTasks [taskid].Hi = NULL ; // assigned later
SaxpyTasks [taskid].Hf = NULL ; // assigned later
SaxpyTasks [taskid].Hx = NULL ; // assigned later
SaxpyTasks [taskid].my_cjnz = 0 ; // for fine tasks only
SaxpyTasks [taskid].leader = taskid ;
SaxpyTasks [taskid].team_size = 1 ;
}
//------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: create balanced tasks for saxpy3
//------------------------------------------------------------------------------
GrB_Info GB_AxB_saxpy3_slice_balanced
(
// inputs
GrB_Matrix C, // output matrix
const GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash
// outputs
GB_saxpy3task_struct **SaxpyTasks_handle,
size_t *SaxpyTasks_size_handle,
bool *apply_mask, // if true, apply M during sapxy3
bool *M_in_place, // if true, use M in-place
int *ntasks, // # of tasks created (coarse and fine)
int *nfine, // # of fine tasks created
int *nthreads, // # of threads to use
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*apply_mask) = false ;
(*M_in_place) = false ;
(*ntasks) = 0 ;
(*nfine) = 0 ;
(*nthreads) = 0 ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT_MATRIX_OK (A, "A for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT_MATRIX_OK (B, "B for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// define result and workspace
//--------------------------------------------------------------------------
GB_saxpy3task_struct *restrict SaxpyTasks = NULL ;
size_t SaxpyTasks_size = 0 ;
GB_WERK_DECLARE (Coarse_initial, int64_t) ; // initial coarse tasks
GB_WERK_DECLARE (Coarse_Work, int64_t) ; // workspace for flop counts
GB_WERK_DECLARE (Fine_slice, int64_t) ;
GB_WERK_DECLARE (Fine_fl, int64_t) ; // size max(nnz(B(:,j)))
//--------------------------------------------------------------------------
// get A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t avlen = A->vlen ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int8_t *restrict Bb = B->b ;
const int64_t *restrict Bi = B->i ;
const int64_t bvdim = B->vdim ;
const int64_t bnz = GB_nnz_held (B) ;
const int64_t bnvec = B->nvec ;
const int64_t bvlen = B->vlen ;
const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ;
int64_t cvlen = avlen ;
int64_t cvdim = bvdim ;
//--------------------------------------------------------------------------
// compute flop counts for each vector of B and C
//--------------------------------------------------------------------------
int64_t Mwork = 0 ;
int64_t *restrict Bflops = C->p ; // use C->p as workspace for Bflops
GB_OK (GB_AxB_saxpy3_flopcount (&Mwork, Bflops, M, Mask_comp, A, B,
Context)) ;
int64_t total_flops = Bflops [bnvec] ;
double axbflops = total_flops - Mwork ;
GBURBLE ("axbwork %g ", axbflops) ;
if (Mwork > 0) GBURBLE ("mwork %g ", (double) Mwork) ;
//--------------------------------------------------------------------------
// determine if the mask M should be applied, or done later
//--------------------------------------------------------------------------
if (M == NULL)
{
//----------------------------------------------------------------------
// M is not present
//----------------------------------------------------------------------
(*apply_mask) = false ;
}
else if (GB_IS_BITMAP (M) || GB_as_if_full (M))
{
//----------------------------------------------------------------------
// M is present and full, bitmap, or sparse/hyper with all entries
//----------------------------------------------------------------------
// Choose all-hash or all-Gustavson tasks, and apply M during saxpy3.
(*apply_mask) = true ;
// The work for M has not yet been added Bflops.
// Each vector M(:,j) has cvlen entries.
Mwork = cvlen * cvdim ;
if (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON))
{
if (axbflops < (double) Mwork * GB_MWORK_BETA)
{
// The mask is too costly to scatter into the Hf workspace.
// Leave it in place and use all-hash tasks.
AxB_method = GxB_AxB_HASH ;
}
else
{
// Scatter M into Hf and use all-Gustavson tasks.
AxB_method = GxB_AxB_GUSTAVSON ;
}
}
if (AxB_method == GxB_AxB_HASH)
{
// Use the hash method for all tasks (except for those tasks which
// require a hash table size >= cvlen; those tasks use Gustavson).
// Do not scatter the mask into the Hf hash workspace. The work
// for the mask is not accounted for in Bflops, so the hash tables
// can be small.
(*M_in_place) = true ;
GBURBLE ("(use mask in-place) ") ;
}
else
{
// Use the Gustavson method for all tasks, and scatter M into the
// fine Gustavson workspace. The work for M is not yet in the
// Bflops cumulative sum. Add it now.
ASSERT (AxB_method == GxB_AxB_GUSTAVSON)
int nth = GB_nthreads (bnvec, chunk, nthreads_max) ;
int64_t kk ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (kk = 0 ; kk <= bnvec ; kk++)
{
Bflops [kk] += cvlen * (kk+1) ;
}
total_flops = Bflops [bnvec] ;
GBURBLE ("(use mask) ") ;
}
}
else if (axbflops < ((double) Mwork * GB_MWORK_ALPHA))
{
//----------------------------------------------------------------------
// M is costly to use; apply it after C=A*B
//----------------------------------------------------------------------
// Do not use M during the computation of A*B. Instead, compute C=A*B
// and then apply the mask later. Tell the caller that the mask should
// not be applied, so that it will be applied later in GB_mxm.
(*apply_mask) = false ;
GBURBLE ("(discard mask) ") ;
GB_FREE_ALL ;
return (GrB_NO_VALUE) ;
}
else
{
//----------------------------------------------------------------------
// use M during saxpy3
//----------------------------------------------------------------------
(*apply_mask) = true ;
GBURBLE ("(use mask) ") ;
}
//--------------------------------------------------------------------------
// determine # of threads and # of initial coarse tasks
//--------------------------------------------------------------------------
(*nthreads) = GB_nthreads ((double) total_flops, chunk, nthreads_max) ;
int ntasks_initial = ((*nthreads) == 1) ? 1 :
(GB_NTASKS_PER_THREAD * (*nthreads)) ;
//--------------------------------------------------------------------------
// give preference to Gustavson when using few threads
//--------------------------------------------------------------------------
if ((*nthreads) <= 8 &&
(!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON)))
{
// Unless a specific method has been explicitly requested, see if
// Gustavson should be used with a small number of threads.
// Matrix-vector has a maximum intensity of 1, so this heuristic only
// applies to GrB_mxm.
double abnz = GB_nnz (A) + GB_nnz (B) + 1 ;
double workspace = (double) ntasks_initial * (double) cvlen ;
double intensity = total_flops / abnz ;
GBURBLE ("(intensity: %0.3g workspace/(nnz(A)+nnz(B)): %0.3g",
intensity, workspace / abnz) ;
if (intensity >= 8 && workspace < abnz)
{
// work intensity is large, and Gustvason workspace is modest;
// use Gustavson for all tasks
AxB_method = GxB_AxB_GUSTAVSON ;
GBURBLE (": select Gustvason) ") ;
}
else
{
// use default task creation: mix of Hash and Gustavson
GBURBLE (") ") ;
}
}
//--------------------------------------------------------------------------
// determine target task size
//--------------------------------------------------------------------------
double target_task_size = ((double) total_flops) / ntasks_initial ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
double target_fine_size = target_task_size / GB_FINE_WORK ;
target_fine_size = GB_IMAX (target_fine_size, chunk) ;
//--------------------------------------------------------------------------
// determine # of parallel tasks
//--------------------------------------------------------------------------
int ncoarse = 0 ; // # of coarse tasks
int max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks
// FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j)
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// construct initial coarse tasks
//----------------------------------------------------------------------
GB_WERK_PUSH (Coarse_initial, ntasks_initial + 1, int64_t) ;
if (Coarse_initial == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse_initial, Bflops, bnvec, ntasks_initial, true) ;
//----------------------------------------------------------------------
// split the work into coarse and fine tasks
//----------------------------------------------------------------------
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
// where j == GBH (Bh, kk)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// vectors kcoarse_start to kk-1 form a single
// coarse task
ncoarse++ ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// vectors kk will be split into multiple fine tasks
max_bjnz = GB_IMAX (max_bjnz, bjnz) ;
int team_size = ceil (jflops / target_fine_size) ;
(*nfine) += team_size ;
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// vectors kcoarse_start to klast-1 form a single
// coarse task
ncoarse++ ;
}
}
else
{
// This coarse task is OK as-is.
ncoarse++ ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
if (bnvec == 1)
{
// If B is a single vector, and is computed by a single thread,
// then a single fine task is used.
(*nfine) = 1 ;
ncoarse = 0 ;
}
else
{
// One thread uses a single coarse task if B is not a vector.
(*nfine) = 0 ;
ncoarse = 1 ;
}
}
(*ntasks) = ncoarse + (*nfine) ;
//--------------------------------------------------------------------------
// allocate the tasks, and workspace to construct fine tasks
//--------------------------------------------------------------------------
SaxpyTasks = GB_MALLOC_WORK ((*ntasks), GB_saxpy3task_struct,
&SaxpyTasks_size) ;
GB_WERK_PUSH (Coarse_Work, nthreads_max, int64_t) ;
if (max_bjnz > 0)
{
// also allocate workspace to construct fine tasks
GB_WERK_PUSH (Fine_slice, (*ntasks)+1, int64_t) ;
// Fine_fl will only fit on the Werk stack if max_bjnz is small,
// but try anyway, in case it fits. It is placed at the top of the
// Werk stack.
GB_WERK_PUSH (Fine_fl, max_bjnz+1, int64_t) ;
}
if (SaxpyTasks == NULL || Coarse_Work == NULL ||
(max_bjnz > 0 && (Fine_slice == NULL || Fine_fl == NULL)))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// clear SaxpyTasks
memset (SaxpyTasks, 0, SaxpyTasks_size) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// create the coarse and fine tasks
//----------------------------------------------------------------------
int nf = 0 ; // fine tasks have task id 0:nfine-1
int nc = (*nfine) ; // coarse task ids are nfine:ntasks-1
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// kcoarse_start:kk-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, kk-1,
SaxpyTasks, nc++, Bflops, cvlen, chunk,
nthreads_max, Coarse_Work, AxB_method) ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// count the work for each entry B(k,j). Do not
// include the work to scan M(:,j), since that will
// be evenly divided between all tasks in this team.
int64_t pB_start = GBP (Bp, kk, bvlen) ;
int nth = GB_nthreads (bjnz, chunk, nthreads_max) ;
int64_t s ;
#pragma omp parallel for num_threads(nth) \
schedule(static)
for (s = 0 ; s < bjnz ; s++)
{
// get B(k,j)
Fine_fl [s] = 1 ;
int64_t pB = pB_start + s ;
if (!GBB (Bb, pB)) continue ;
int64_t k = GBI (Bi, pB, bvlen) ;
// fl = flop count for just A(:,k)*B(k,j)
int64_t pA, pA_end ;
int64_t pleft = 0 ;
GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft,
anvec-1, k, &pA, &pA_end) ;
int64_t fl = pA_end - pA ;
Fine_fl [s] = fl ;
ASSERT (fl >= 0) ;
}
// cumulative sum of flops to compute A*B(:,j)
GB_cumsum (Fine_fl, bjnz, NULL, nth, Context) ;
// slice B(:,j) into fine tasks
int team_size = ceil (jflops / target_fine_size) ;
ASSERT (Fine_slice != NULL) ;
GB_pslice (Fine_slice, Fine_fl, bjnz, team_size, false);
// shared hash table for all fine tasks for A*B(:,j)
int64_t hsize =
GB_hash_table_size (jflops, cvlen, AxB_method) ;
// construct the fine tasks for C(:,j)=A*B(:,j)
int leader = nf ;
for (int fid = 0 ; fid < team_size ; fid++)
{
int64_t pstart = Fine_slice [fid] ;
int64_t pend = Fine_slice [fid+1] ;
int64_t fl = Fine_fl [pend] - Fine_fl [pstart] ;
SaxpyTasks [nf].start = pB_start + pstart ;
SaxpyTasks [nf].end = pB_start + pend - 1 ;
SaxpyTasks [nf].vector = kk ;
SaxpyTasks [nf].hsize = hsize ;
SaxpyTasks [nf].Hi = NULL ; // assigned later
SaxpyTasks [nf].Hf = NULL ; // assigned later
SaxpyTasks [nf].Hx = NULL ; // assigned later
SaxpyTasks [nf].my_cjnz = 0 ;
SaxpyTasks [nf].leader = leader ;
SaxpyTasks [nf].team_size = team_size ;
nf++ ;
}
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// kcoarse_start:klast-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
else
{
// This coarse task is OK as-is.
GB_create_coarse_task (kfirst, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
// create a single coarse task: hash or Gustavson
GB_create_coarse_task (0, bnvec-1, SaxpyTasks, 0, Bflops, cvlen, 1, 1,
Coarse_Work, AxB_method) ;
if (bnvec == 1)
{
// convert the single coarse task into a single fine task
SaxpyTasks [0].start = 0 ; // first entry in B(:,0)
SaxpyTasks [0].end = bnz - 1 ; // last entry in B(:,0)
SaxpyTasks [0].vector = 0 ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
(*SaxpyTasks_handle) = SaxpyTasks ;
(*SaxpyTasks_size_handle) = SaxpyTasks_size ;
return (GrB_SUCCESS) ;
}
|
pr27358.c | /* PR c/27358 */
/* { dg-do compile } */
void foo(error i) /* { dg-error "" } */
{
#pragma omp parallel
i = 0;
}
|
GB_binop__le_bool.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__le_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_bool)
// A*D function (colscale): GB (_AxD__le_bool)
// D*A function (rowscale): GB (_DxB__le_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__le_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__le_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_bool)
// C=scalar+B GB (_bind1st__le_bool)
// C=scalar+B' GB (_bind1st_tran__le_bool)
// C=A+scalar GB (_bind2nd__le_bool)
// C=A'+scalar GB (_bind2nd_tran__le_bool)
// C type: bool
// A type: bool
// A pattern? 0
// B type: bool
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_bool)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
bool alpha_scalar ;
bool beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((bool *) alpha_scalar_in)) ;
beta_scalar = (*((bool *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_fp32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_bool
// op(A') function: GB_tran__ainv_fp32_bool
// C type: float
// A type: bool
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_bool
(
float *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__abs_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_uint8_uint8)
// op(A') function: GB (_unop_tran__abs_uint8_uint8)
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_uint8_uint8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_uint8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vertex_miner.h | #pragma once
#include "miner.h"
typedef std::unordered_map<BaseEmbedding, Frequency> SimpleMap;
typedef QuickPattern<EdgeInducedEmbedding<StructuralElement>, StructuralElement> StrQPattern; // structural quick pattern
typedef CanonicalGraph<EdgeInducedEmbedding<StructuralElement>, StructuralElement> StrCPattern; // structural canonical pattern
typedef std::unordered_map<StrQPattern, Frequency> StrQpMapFreq; // mapping structural quick pattern to its frequency
typedef std::unordered_map<StrCPattern, Frequency> StrCgMapFreq; // mapping structural canonical pattern to its frequency
typedef PerThreadStorage<StrQpMapFreq> LocalStrQpMapFreq;
typedef PerThreadStorage<StrCgMapFreq> LocalStrCgMapFreq;
#define CHUNK_SIZE 256
class VertexMiner : public Miner {
public:
VertexMiner(Graph *g, unsigned size = 3, int nthreads = 1) {
graph = g;
max_size = size;
degree_counting();
numThreads = nthreads;
}
virtual ~VertexMiner() {}
// extension for vertex-induced motif
inline void extend_vertex(unsigned level, EmbeddingList& emb_list) {
UintList num_new_emb(emb_list.size());
#pragma omp parallel for schedule(dynamic, CHUNK_SIZE)
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
num_new_emb[pos] = 0;
unsigned n = emb.size();
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
//for (auto e : graph->edges(src)) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
num_new_emb[pos] ++;
}
}
}
}
UintList indices = parallel_prefix_sum<unsigned>(num_new_emb);
num_new_emb.clear();
auto new_size = indices.back();
assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32
std::cout << "number of new embeddings: " << new_size << "\n";
emb_list.add_level(new_size);
#ifdef USE_WEDGE
if (level == 1 && max_size == 4) {
is_wedge.resize(emb_list.size());
std::fill(is_wedge.begin(), is_wedge.end(), 0);
}
#endif
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
auto start = indices[pos];
auto n = emb.size();
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
//for (auto e : graph->edges(src)) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
assert(start < indices.back());
if (n == 2 && max_size == 4)
emb_list.set_pid(start, find_motif_pattern_id(n, i, dst, emb, start));
emb_list.set_idx(level+1, start, pos);
emb_list.set_vid(level+1, start++, dst);
}
}
}
}
indices.clear();
}
// extension for vertex-induced clique
inline void extend_vertex(unsigned level, EmbeddingList& emb_list, uint64_t &num) {
UintList num_new_emb(emb_list.size());
#pragma omp parallel for schedule(dynamic, CHUNK_SIZE) reduction(+:num)
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
BaseEmbedding emb(level+1);
get_embedding<BaseEmbedding>(level, pos, emb_list, emb);
VertexId vid = emb_list.get_vid(level, pos);
num_new_emb[pos] = 0;
for (auto dst : graph->N(vid)) {
if (is_all_connected_dag(dst, emb, level)) {
if (level < max_size-2) num_new_emb[pos] ++;
else num += 1;
}
}
}
if (level == max_size-2) return;
UintList indices = parallel_prefix_sum<unsigned>(num_new_emb);
num_new_emb.clear();
auto new_size = indices.back();
std::cout << "number of new embeddings: " << new_size << "\n";
emb_list.add_level(new_size);
#pragma omp parallel for schedule(dynamic, CHUNK_SIZE)
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
BaseEmbedding emb(level+1);
get_embedding<BaseEmbedding>(level, pos, emb_list, emb);
VertexId vid = emb_list.get_vid(level, pos);
unsigned start = indices[pos];
for (auto dst : graph->N(vid)) {
if (is_all_connected_dag(dst, emb, level)) {
emb_list.set_idx(level+1, start, pos);
emb_list.set_vid(level+1, start++, dst);
}
}
}
indices.clear();
}
// motif reduction
inline void aggregate(unsigned level, EmbeddingList& emb_list,
std::vector<std::vector<uint64_t>> &counts) {
#pragma omp parallel
{
auto &counter = counts.at(omp_get_thread_num());
#pragma omp for schedule(dynamic, CHUNK_SIZE)
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
unsigned n = emb.size();
if (n == 3) emb.set_pid(emb_list.get_pid(pos));
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
for (auto dst : graph->N(src)) {
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
assert(n < 4);
unsigned pid = find_motif_pattern_id(n, i, dst, emb, pos);
counter[pid] += 1;
}
}
}
emb.clean();
}
}
}
/*
inline void quick_aggregate(unsigned level, const EmbeddingList& emb_list) {
for (auto i = 0; i < numThreads; i++) qp_localmaps.getLocal(i)->clear();
#pragma omp parallel for schedule(dynamic, CHUNK_SIZE)
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
int tid = omp_get_thread_num();
StrQpMapFreq* qp_map = qp_localmaps.getLocal(tid);
VertexEmbedding emb(level+1);
get_embedding<VertexEmbedding>(level, pos, emb_list, emb);
unsigned n = emb.size();
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
//for (auto e : graph->edges(src)) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
if (!is_vertexInduced_automorphism(emb, i, src, dst)) {
std::vector<bool> connected;
get_connectivity(n, i, dst, emb, connected);
StrQPattern qp(n+1, connected);
if (qp_map->find(qp) != qp_map->end()) {
(*qp_map)[qp] += 1;
qp.clean();
} else (*qp_map)[qp] = 1;
}
}
}
emb.clean();
}
}
// canonical pattern aggregation
inline void canonical_aggregate() {
for (auto i = 0; i < numThreads; i++) cg_localmaps.getLocal(i)->clear();
//auto it = qp_map.begin();
//#pragma omp parallel for schedule(dynamic, CHUNK_SIZE)
for (auto i = 0; i < qp_map.size(); i++) {
StrCgMapFreq* cg_map = cg_localmaps.getLocal(i);
StrCPattern cg(it->first);
if (cg_map->find(cg) != cg_map->end()) (*cg_map)[cg] += it->second;
else (*cg_map)[cg] = it->second;
cg.clean();
}
qp_map.clear();
}
inline void merge_qp_map() {
qp_map.clear();
for (unsigned i = 0; i < qp_localmaps.size(); i++) {
StrQpMapFreq qp_lmap = *qp_localmaps.getLocal(i);
for (auto element : qp_lmap) {
if (qp_map.find(element.first) != qp_map.end())
qp_map[element.first] += element.second;
else qp_map[element.first] = element.second;
}
}
}
inline void merge_cg_map() {
cg_map.clear();
for (unsigned i = 0; i < cg_localmaps.size(); i++) {
StrCgMapFreq cg_lmap = *cg_localmaps.getLocal(i);
for (auto element : cg_lmap) {
if (cg_map.find(element.first) != cg_map.end())
cg_map[element.first] += element.second;
else cg_map[element.first] = element.second;
}
}
}
void printout_motifs(uintmap &p_map) {
assert(p_map.size() == 21);
std::cout << std::endl;
for (auto it = p_map.begin(); it != p_map.end(); ++it)
std::cout << "{" << it->first << "} --> " << it->second << std::endl;
std::cout << std::endl;
}
void printout_motifs() {
std::cout << std::endl;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it)
std::cout << it->first << " --> " << it->second << std::endl;
std::cout << std::endl;
}
*/
private:
//unsigned num_cliques;
unsigned max_size;
int numThreads;
std::vector<unsigned> is_wedge; // indicate a 3-vertex embedding is a wedge or chain (v0-cntered or v1-centered)
StrQpMapFreq qp_map; // quick patterns map for counting the frequency
StrCgMapFreq cg_map; // canonical graph map for couting the frequency
LocalStrQpMapFreq qp_localmaps; // quick patterns local map for each thread
LocalStrCgMapFreq cg_localmaps; // canonical graph local map for each thread
template <typename EmbeddingTy>
inline void get_embedding(unsigned level, unsigned pos, const EmbeddingList& emb_list, EmbeddingTy &emb) {
VertexId vid = emb_list.get_vid(level, pos);
IndexTy idx = emb_list.get_idx(level, pos);
ElementType ele(vid);
emb.set_element(level, ele);
// backward constructing the embedding
for (unsigned l = 1; l < level; l ++) {
VertexId u = emb_list.get_vid(level-l, idx);
ElementType ele(u);
emb.set_element(level-l, ele);
idx = emb_list.get_idx(level-l, idx);
}
ElementType ele0(idx);
emb.set_element(0, ele0);
}
inline bool is_vertexInduced_automorphism(const VertexEmbedding& emb, unsigned idx, VertexId src, VertexId dst) {
unsigned n = emb.size();
// the new vertex id should be larger than the first vertex id
if (dst <= emb.get_vertex(0)) return true;
// the new vertex should not already exist in the embedding
for (unsigned i = 1; i < n; ++i)
if (dst == emb.get_vertex(i)) return true;
// the new vertex should not already be extended by any previous vertex in the embedding
for (unsigned i = 0; i < idx; ++i)
if (is_connected(emb.get_vertex(i), dst)) return true;
// the new vertex id should be larger than any vertex id after its source vertex in the embedding
for (unsigned i = idx+1; i < n; ++i)
if (dst < emb.get_vertex(i)) return true;
return false;
}
inline unsigned find_motif_pattern_id(unsigned n, unsigned idx, VertexId dst, const VertexEmbedding& emb, unsigned pos = 0) {
unsigned pid = 0;
if (n == 2) { // count 3-motifs
pid = 1; // 3-chain
if (idx == 0) {
if (is_connected(emb.get_vertex(1), dst)) pid = 0; // triangle
#ifdef USE_WEDGE
else if (max_size == 4) is_wedge[pos] = 1; // wedge; used for 4-motif
#endif
}
} else if (n == 3) { // count 4-motifs
unsigned num_edges = 1;
pid = emb.get_pid();
if (pid == 0) { // extending a triangle
for (unsigned j = idx+1; j < n; j ++)
if (is_connected(emb.get_vertex(j), dst)) num_edges ++;
pid = num_edges + 2; // p3: tailed-triangle; p4: diamond; p5: 4-clique
} else { // extending a 3-chain
assert(pid == 1);
std::vector<bool> connected(3, false);
connected[idx] = true;
for (unsigned j = idx+1; j < n; j ++) {
if (is_connected(emb.get_vertex(j), dst)) {
num_edges ++;
connected[j] = true;
}
}
if (num_edges == 1) {
pid = 0; // p0: 3-path
unsigned center = 1;
#ifdef USE_WEDGE
if (is_wedge[pos]) center = 0;
#else
center = is_connected(emb.get_vertex(1), emb.get_vertex(2)) ? 1 : 0;
#endif
if (idx == center) pid = 1; // p1: 3-star
} else if (num_edges == 2) {
pid = 2; // p2: 4-cycle
unsigned center = 1;
#ifdef USE_WEDGE
if (is_wedge[pos]) center = 0;
#else
center = is_connected(emb.get_vertex(1), emb.get_vertex(2)) ? 1 : 0;
#endif
if (connected[center]) pid = 3; // p3: tailed-triangle
} else {
pid = 4; // p4: diamond
}
}
} else { // count 5-motif and beyond
std::vector<bool> connected;
get_connectivity(n, idx, dst, emb, connected);
Matrix A(n+1, std::vector<MatType>(n+1, 0));
gen_adj_matrix(n+1, connected, A);
std::vector<MatType> c(n+1, 0);
char_polynomial(n+1, A, c);
bliss::UintSeqHash h;
for (unsigned i = 0; i < n+1; ++i)
h.update((unsigned)c[i]);
pid = h.get_value();
}
return pid;
}
};
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<unsigned> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FPOptionsOverride(FpPragmaStack.CurrentValue);
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
unsigned getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
unsigned OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_unaryop__ainv_int16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_uint8
// op(A') function: GB_tran__ainv_int16_uint8
// C type: int16_t
// A type: uint8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_uint8
(
int16_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ne_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int16)
// A*D function (colscale): GB (_AxD__ne_int16)
// D*A function (rowscale): GB (_DxB__ne_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int16)
// C=scalar+B GB (_bind1st__ne_int16)
// C=scalar+B' GB (_bind1st_tran__ne_int16)
// C=A+scalar GB (_bind2nd__ne_int16)
// C=A'+scalar GB (_bind2nd_tran__ne_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT16 || GxB_NO_NE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gimple.h | /* Gimple IR definitions.
Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
#include "pointer-set.h"
#include "vec.h"
#include "ggc.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "tree-ssa-operands.h"
DEF_VEC_P(gimple);
DEF_VEC_ALLOC_P(gimple,heap);
DEF_VEC_ALLOC_P(gimple,gc);
typedef gimple *gimple_p;
DEF_VEC_P(gimple_p);
DEF_VEC_ALLOC_P(gimple_p,heap);
DEF_VEC_P(gimple_seq);
DEF_VEC_ALLOC_P(gimple_seq,gc);
DEF_VEC_ALLOC_P(gimple_seq,heap);
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef VEC(gimple, heap) *gimple_vec;
DEF_VEC_P (gimple_vec);
DEF_VEC_ALLOC_P (gimple_vec, heap);
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use sub-codes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_CANNOT_INLINE = 1 << 0,
GF_CALL_FROM_THUNK = 1 << 1,
GF_CALL_RETURN_SLOT_OPT = 1 << 2,
GF_CALL_TAILCALL = 1 << 3,
GF_CALL_VA_ARG_PACK = 1 << 4,
GF_CALL_NOTHROW = 1 << 5,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there's only one type of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* A node in a gimple_seq_d. */
struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d {
gimple stmt;
struct gimple_seq_node_d *prev;
struct gimple_seq_node_d *next;
};
/* A double-linked sequence of gimple statements. */
struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d {
/* First and last statements in the sequence. */
gimple_seq_node first;
gimple_seq_node last;
/* Sequences are created/destroyed frequently. To minimize
allocation activity, deallocated sequences are kept in a pool of
available sequences. This is the pointer to the next free
sequence in the pool. */
gimple_seq next_free;
};
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (const_gimple_seq s)
{
return s ? s->first : NULL;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return (n) ? n->stmt : NULL;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (const_gimple_seq s)
{
return s ? s->last : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return (n) ? n->stmt : NULL;
}
/* Set the last node in GIMPLE sequence S to LAST. */
static inline void
gimple_seq_set_last (gimple_seq s, gimple_seq_node last)
{
s->last = last;
}
/* Set the first node in GIMPLE sequence S to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq s, gimple_seq_node first)
{
s->first = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (const_gimple_seq s)
{
return s == NULL || s->first == NULL;
}
void gimple_seq_add_stmt (gimple_seq *, gimple);
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
void gimplify_seq_add_stmt (gimple_seq *, gimple);
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_assert (!(bb->flags & BB_RTL));
bb->il.gimple->seq = seq;
}
/* Iterator object for GIMPLE statement sequences. */
typedef struct
{
/* Sequence node holding the current statement. */
gimple_seq_node ptr;
/* Sequence and basic block holding the statement. These fields
are necessary to handle edge cases such as when statement is
added to an empty basic block or when the last statement of a
block/sequence is removed. */
gimple_seq seq;
basic_block bb;
} gimple_stmt_iterator;
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY(()) gimple_statement_base {
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code) code : 8;
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* Padding to get subcode to 16 bit alignment. */
unsigned pad : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
struct basic_block_def *bb;
/* [ WORD 4 ]
Lexical block holding this statement. */
tree block;
};
/* Base structure for tuples with operands. */
struct GTY(()) gimple_statement_with_ops_base
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct def_optype_d GTY((skip (""))) *def_ops;
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY(()) gimple_statement_with_ops
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops_base
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7-8 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY(()) gimple_statement_omp {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY(()) gimple_statement_bind {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 6 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY(()) gimple_statement_catch {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree types;
/* [ WORD 6 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY(()) gimple_statement_eh_filter {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Filter types. */
tree types;
/* [ WORD 6 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY(()) gimple_statement_eh_mnt {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY(()) gimple_statement_phi {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 6 ] */
tree result;
/* [ WORD 7 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY(()) gimple_statement_eh_ctrl
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Exception region number. */
int region;
};
/* GIMPLE_TRY */
struct GTY(()) gimple_statement_try {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 6 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY(()) gimple_statement_wce {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 5 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY(()) gimple_statement_asm
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
__asm__ statement. */
const char *string;
/* [ WORD 10 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 11 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY(()) gimple_statement_omp_critical {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY(()) gimple_statement_omp_for {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 8 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 9 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL */
struct GTY(()) gimple_statement_omp_parallel {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Clauses. */
tree clauses;
/* [ WORD 7 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 8 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_TASK */
struct GTY(()) gimple_statement_omp_task {
/* [ WORD 1-8 ] */
struct gimple_statement_omp_parallel par;
/* [ WORD 9 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 10-11 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY(()) gimple_statement_omp_sections {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY(()) gimple_statement_omp_continue {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree control_def;
/* [ WORD 6 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE */
struct GTY(()) gimple_statement_omp_single {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY(()) gimple_statement_omp_atomic_load {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY(()) gimple_statement_omp_atomic_store {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree val;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
/* Define the overall contents of a gimple tuple. It may be any of the
structures declared above for various types of tuples. */
union GTY ((desc ("gimple_statement_structure (&%h)"))) gimple_statement_d {
struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem;
struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp;
struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind;
struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce;
struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm;
struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical;
struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for;
struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel;
struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task;
struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections;
struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single;
struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
};
/* In gimple.c. */
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
gimple gimple_build_return (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *);
gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree,
tree, tree MEM_STAT_DECL);
#define gimple_build_assign_with_ops(c,o1,o2,o3) \
gimple_build_assign_with_ops_stat (c, o1, o2, o3, NULL_TREE MEM_STAT_INFO)
#define gimple_build_assign_with_ops3(c,o1,o2,o3,o4) \
gimple_build_assign_with_ops_stat (c, o1, o2, o3, o4 MEM_STAT_INFO)
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_call_vec (tree, VEC(tree, heap) *);
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_from_tree (tree);
gimple gimplify_assign (tree, tree, gimple_seq *);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *,
VEC(tree,gc) *, VEC(tree,gc) *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (unsigned, tree, tree, ...);
gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *);
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_cdt (tree, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (VEC(tree,heap) *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
gimple_seq gimple_seq_alloc (void);
void gimple_seq_free (gimple_seq);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
int gimple_call_flags (const_gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_single_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, struct basic_block_def *);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code,
tree, tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
void gimple_replace_lhs (gimple, tree);
gimple gimple_copy (gimple);
bool is_gimple_operand (const_tree);
void gimple_set_modified (gimple, bool);
void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
bool gimple_has_side_effects (const_gimple);
bool gimple_rhs_has_side_effects (const_gimple);
bool gimple_could_trap_p (gimple);
bool gimple_assign_rhs_could_trap_p (gimple);
void gimple_regimplify_operands (gimple, gimple_stmt_iterator *);
bool empty_body_p (gimple_seq);
unsigned get_gimple_rhs_num_ops (enum tree_code);
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
const char *gimple_decl_printable_name (tree, int);
tree gimple_fold_obj_type_ref (tree, tree);
/* Returns true iff T is a valid GIMPLE statement. */
extern bool is_gimple_stmt (tree);
/* Returns true iff TYPE is a valid type for a scalar register variable. */
extern bool is_gimple_reg_type (tree);
/* Returns true iff T is a scalar register variable. */
extern bool is_gimple_reg (tree);
/* Returns true iff T is any sort of variable. */
extern bool is_gimple_variable (tree);
/* Returns true iff T is any sort of symbol. */
extern bool is_gimple_id (tree);
/* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */
extern bool is_gimple_min_lval (tree);
/* Returns true iff T is something whose address can be taken. */
extern bool is_gimple_addressable (tree);
/* Returns true iff T is any valid GIMPLE lvalue. */
extern bool is_gimple_lvalue (tree);
/* Returns true iff T is a GIMPLE address. */
bool is_gimple_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address. */
bool is_gimple_invariant_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address at interprocedural
level. */
bool is_gimple_ip_invariant_address (const_tree);
/* Returns true iff T is a valid GIMPLE constant. */
bool is_gimple_constant (const_tree);
/* Returns true iff T is a GIMPLE restricted function invariant. */
extern bool is_gimple_min_invariant (const_tree);
/* Returns true iff T is a GIMPLE restricted interprecodural invariant. */
extern bool is_gimple_ip_invariant (const_tree);
/* Returns true iff T is a GIMPLE rvalue. */
extern bool is_gimple_val (tree);
/* Returns true iff T is a GIMPLE asm statement input. */
extern bool is_gimple_asm_val (tree);
/* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a
GIMPLE temporary, a renamed user variable, or something else,
respectively. */
extern bool is_gimple_reg_rhs (tree);
extern bool is_gimple_mem_rhs (tree);
/* Returns true iff T is a valid if-statement condition. */
extern bool is_gimple_condexpr (tree);
/* Returns true iff T is a type conversion. */
extern bool is_gimple_cast (tree);
/* Returns true iff T is a variable that does not need to live in memory. */
extern bool is_gimple_non_addressable (tree t);
/* Returns true iff T is a valid call address expression. */
extern bool is_gimple_call_addr (tree);
/* If T makes a function call, returns the CALL_EXPR operand. */
extern tree get_call_expr_in (tree t);
extern void recalculate_side_effects (tree);
extern bool compare_field_offset (tree, tree);
extern tree gimple_register_type (tree);
extern void print_gimple_types_stats (void);
extern void free_gimple_type_tables (void);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *,
unsigned *);
extern bool walk_stmt_load_store_addr_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool walk_stmt_load_store_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool gimple_ior_addresses_taken (bitmap, gimple);
/* In gimplify.c */
extern tree create_tmp_var_raw (tree, const char *);
extern tree create_tmp_var_name (const char *);
extern tree create_tmp_var (tree, const char *);
extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *);
extern tree get_formal_tmp_var (tree, gimple_seq *);
extern void declare_vars (tree, gimple, bool);
extern void annotate_all_with_location (gimple_seq, location_t);
/* Validation of GIMPLE expressions. Note that these predicates only check
the basic form of the expression, they don't recurse to make sure that
underlying nodes are also of the right form. */
typedef bool (*gimple_predicate)(tree);
/* FIXME we should deduce this from the predicate. */
enum fallback {
fb_none = 0, /* Do not generate a temporary. */
fb_rvalue = 1, /* Generate an rvalue to hold the result of a
gimplified expression. */
fb_lvalue = 2, /* Generate an lvalue to hold the result of a
gimplified expression. */
fb_mayfail = 4, /* Gimplification may fail. Error issued
afterwards. */
fb_either= fb_rvalue | fb_lvalue
};
typedef int fallback_t;
enum gimplify_status {
GS_ERROR = -2, /* Something Bad Seen. */
GS_UNHANDLED = -1, /* A langhook result for "I dunno". */
GS_OK = 0, /* We did something, maybe more to do. */
GS_ALL_DONE = 1 /* The expression is fully gimplified. */
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
VEC(gimple,heap) *bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
VEC(tree,heap) *case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
};
extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t);
extern void gimplify_type_sizes (tree, gimple_seq *);
extern void gimplify_one_sizepos (tree *, gimple_seq *);
extern bool gimplify_stmt (tree *, gimple_seq *);
extern gimple gimplify_body (tree *, tree, bool);
extern void push_gimplify_context (struct gimplify_ctx *);
extern void pop_gimplify_context (gimple);
extern void gimplify_and_add (tree, gimple_seq *);
/* Miscellaneous helpers. */
extern void gimple_add_tmp_var (tree);
extern gimple gimple_current_bind_expr (void);
extern VEC(gimple, heap) *gimple_bind_expr_stack (void);
extern tree voidify_wrapper_expr (tree, tree);
extern tree build_and_jump (tree *);
extern tree alloc_stmt_list (void);
extern void free_stmt_list (tree);
extern tree force_labels_r (tree *, int *, void *);
extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *,
gimple_seq *);
struct gimplify_omp_ctx;
extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree);
extern tree gimple_boolify (tree);
extern gimple_predicate rhs_predicate_for (tree);
extern tree canonicalize_cond_expr_cond (tree);
/* In omp-low.c. */
extern tree omp_reduction_init (tree, tree);
/* In tree-nested.c. */
extern void lower_nested_functions (tree);
extern void insert_field_into_struct (tree, tree);
/* In gimplify.c. */
extern void gimplify_function_tree (tree);
/* In cfgexpand.c. */
extern tree gimple_assign_rhs_to_tree (gimple);
extern void add_reg_br_prob_note (rtx last, int probability);
/* In builtins.c */
extern bool validate_gimple_arglist (const_gimple, ...);
/* In tree-ssa.c */
extern bool tree_ssa_useless_type_conversion (tree);
extern tree tree_ssa_strip_useless_type_conversions (tree);
extern bool useless_type_conversion_p (tree, tree);
extern bool types_compatible_p (tree, tree);
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->gsbase.code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
#ifdef ENABLE_CHECKING
gcc_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
#endif
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline struct basic_block_def *
gimple_bb (const_gimple g)
{
return g->gsbase.bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return g->gsbase.block;
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
g->gsbase.block = block;
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->gsbase.location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->gsbase.location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->gsbase.location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return gimple_location (g) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->gsbase.no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->gsbase.no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->gsbase.visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->gsbase.visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->gsbase.plf |= (unsigned int) plf;
else
stmt->gsbase.plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->gsbase.plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->gsbase.uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->gsbase.uid;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return the set of DEF operands for statement G. */
static inline struct def_optype_d *
gimple_def_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.def_ops;
}
/* Set DEF to be the set of DEF operands for statement G. */
static inline void
gimple_set_def_ops (gimple g, struct def_optype_d *def)
{
gcc_assert (gimple_has_ops (g));
g->gsops.opbase.def_ops = def;
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gcc_assert (gimple_has_ops (g));
g->gsops.opbase.use_ops = use;
}
/* Return the set of VUSE operand for statement G. */
static inline use_operand_p
gimple_vuse_op (const_gimple g)
{
struct use_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_USE_OPERAND_P;
ops = g->gsops.opbase.use_ops;
if (ops
&& USE_OP_PTR (ops)->use == &g->gsmembase.vuse)
return USE_OP_PTR (ops);
return NULL_USE_OPERAND_P;
}
/* Return the set of VDEF operand for statement G. */
static inline def_operand_p
gimple_vdef_op (const_gimple g)
{
struct def_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_DEF_OPERAND_P;
ops = g->gsops.opbase.def_ops;
if (ops
&& DEF_OP_PTR (ops) == &g->gsmembase.vdef)
return DEF_OP_PTR (ops);
return NULL_DEF_OPERAND_P;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gcc_assert (gimple_has_mem_ops (g));
g->gsmembase.vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gcc_assert (gimple_has_mem_ops (g));
g->gsmembase.vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->gsbase.subcode;
else if (code == GIMPLE_CALL)
return CALL_EXPR;
else
gcc_unreachable ();
}
/* Mark statement S as modified, and update it. */
static inline void
update_stmt (gimple s)
{
if (gimple_has_ops (s))
{
gimple_set_modified (s, true);
update_stmt_operands (s);
}
}
/* Update statement S if it has been optimized. */
static inline void
update_stmt_if_modified (gimple s)
{
if (gimple_modified_p (s))
update_stmt_operands (s);
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->gsbase.has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->gsbase.has_volatile_ops = (unsigned) volatilep;
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_SINGLE);
return s->gsbase.subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_assert (subcode < (1 << 16));
s->gsbase.subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->gsbase.subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->gsbase.num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->gsbase.num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
#ifdef ENABLE_CHECKING
gcc_assert (i < gimple_num_ops (gs));
#endif
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
#ifdef ENABLE_CHECKING
gcc_assert (i < gimple_num_ops (gs));
#endif
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 2, rhs);
}
/* Return the third operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs3 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 4)
return gimple_op (gs, 3);
else
return NULL_TREE;
}
/* Return a pointer to the third operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs3_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 3);
}
/* Set RHS to be the third operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs3 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 3, rhs);
}
/* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect
to see only a maximum of two operands. */
static inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1, tree op2)
{
gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL);
}
/* A wrapper around extract_ops_from_tree_1, for callers which expect
to see only a maximum of two operands. */
static inline void
extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0,
tree *op1)
{
tree op2;
extract_ops_from_tree_1 (expr, code, op0, op1, &op2);
gcc_assert (op2 == NULL_TREE);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->gsbase.nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->gsbase.nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = gimple_expr_code (gs);
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->gsbase.subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
tree addr = gimple_call_fn (gs);
if (TREE_CODE (addr) == ADDR_EXPR)
return TREE_OPERAND (addr, 0);
return NULL_TREE;
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree fn = gimple_call_fn (gs);
tree type = TREE_TYPE (fn);
/* See through the pointer. */
type = TREE_TYPE (type);
/* The type returned by a FUNCTION_DECL is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->gsbase.subcode |= GF_CALL_TAILCALL;
else
s->gsbase.subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0;
}
/* Set the inlinable status of GIMPLE_CALL S to INLINABLE_P. */
static inline void
gimple_call_set_cannot_inline (gimple s, bool inlinable_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (inlinable_p)
s->gsbase.subcode |= GF_CALL_CANNOT_INLINE;
else
s->gsbase.subcode &= ~GF_CALL_CANNOT_INLINE;
}
/* Return true if GIMPLE_CALL S cannot be inlined. */
static inline bool
gimple_call_cannot_inline_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_CANNOT_INLINE) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->gsbase.subcode |= GF_CALL_FROM_THUNK;
else
s->gsbase.subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->gsbase.subcode |= GF_CALL_VA_ARG_PACK;
else
s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->gsbase.subcode |= GF_CALL_NOTHROW;
else
s->gsbase.subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->gsbase.subcode = orig_call->gsbase.subcode;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->gsbase.subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->gsbase.subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (var != 0)' or
'if (var == 1)' */
static inline bool
gimple_cond_single_var_p (gimple gs)
{
if (gimple_cond_code (gs) == NE_EXPR
&& gimple_cond_rhs (gs) == boolean_false_node)
return true;
if (gimple_cond_code (gs) == EQ_EXPR
&& gimple_cond_rhs (gs) == boolean_true_node)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars);
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.body;
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_stmt (&gs->gimple_bind.body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_seq (&gs->gimple_bind.body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gcc_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK);
gs->gimple_bind.block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
return gimple_op (gs, index);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
return gimple_op_ptr (gs, index);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
gcc_assert (TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
return gimple_op (gs, index + gs->gimple_asm.ni);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
return gimple_op_ptr (gs, index + gs->gimple_asm.ni);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
gcc_assert (TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nc);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nc);
gcc_assert (TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nl);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nl);
gcc_assert (TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->gsbase.subcode |= GF_ASM_VOLATILE;
else
gs->gsbase.subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->gsbase.subcode |= GF_ASM_INPUT;
else
gs->gsbase.subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.types;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.handler;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.handler;
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.types;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.failure;
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
return gs->gimple_eh_mnt.fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
gs->gimple_eh_mnt.fndecl = decl;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->gsbase.subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.eval;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.cleanup;
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.cleanup = cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gimple_wce.cleanup;
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gimple_wce.cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gsbase.subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gsbase.subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return &gs->gimple_phi.result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gs->gimple_phi.result = result;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_assert (index <= gs->gimple_phi.capacity);
return &(gs->gimple_phi.args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_assert (index <= gs->gimple_phi.nargs);
memcpy (gs->gimple_phi.args + index, phiarg, sizeof (struct phi_arg_d));
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
gs->gimple_eh_ctrl.region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
gs->gimple_eh_ctrl.region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (gimple_num_ops (gs) > index + 1);
gcc_assert (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR);
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
return gimple_switch_label (gs, 0);
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return gs->omp.body;
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
gs->omp.body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return gs->gimple_omp_critical.name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return &gs->gimple_omp_critical.name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
gs->gimple_omp_critical.name = name;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].incr = incr;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.pre_body;
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return gs->gimple_omp_single.clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return &gs->gimple_omp_single.clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
gs->gimple_omp_single.clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (TREE_CODE_CLASS (cond) == tcc_comparison);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gimple_omp_atomic_store.val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return g->gimple_omp_atomic_store.val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return &g->gimple_omp_atomic_store.val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_use = use;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statment STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->gsbase.subcode |= GF_PREDICT_TAKEN;
else
gs->gsbase.subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
type = gimple_call_return_type (stmt);
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
static inline gimple_stmt_iterator
gsi_start (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the first statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
static inline gimple_stmt_iterator
gsi_last (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the last statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return true if I is at the end of its sequence. */
static inline bool
gsi_end_p (gimple_stmt_iterator i)
{
return i.ptr == NULL;
}
/* Return true if I is one statement before the end of its sequence. */
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->next == NULL;
}
/* Advance the iterator to the next gimple statement. */
static inline void
gsi_next (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->next;
}
/* Advance the iterator to the previous gimple statement. */
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->prev;
}
/* Return the current stmt. */
static inline gimple
gsi_stmt (gimple_stmt_iterator i)
{
return i.ptr->stmt;
}
/* Return a block statement iterator that points to the first non-label
statement in block BB. */
static inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
gsi_next (&gsi);
return gsi;
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_next_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_next (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_prev_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_prev (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_start_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_next_nondebug (&i);
return i;
}
/* Return a new iterator pointing to the last non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_last_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_prev_nondebug (&i);
return i;
}
/* Return a pointer to the current stmt.
NOTE: You may want to use gsi_replace on the iterator itself,
as this performs additional bookkeeping that will not be done
if you simply assign through a pointer returned by gsi_stmt_ptr. */
static inline gimple *
gsi_stmt_ptr (gimple_stmt_iterator *i)
{
return &i->ptr->stmt;
}
/* Return the basic block associated with this iterator. */
static inline basic_block
gsi_bb (gimple_stmt_iterator i)
{
return i.bb;
}
/* Return the sequence associated with this iterator. */
static inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
return i.seq;
}
enum gsi_iterator_update
{
GSI_NEW_STMT, /* Only valid when single statement is added, move
iterator to it. */
GSI_SAME_STMT, /* Leave the iterator at the same statement. */
GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable
for linking other statements in the same
direction. */
};
/* In gimple-iterator.c */
gimple_stmt_iterator gsi_start_phis (basic_block);
gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
gimple_seq gsi_split_seq_before (gimple_stmt_iterator *);
void gsi_replace (gimple_stmt_iterator *, gimple, bool);
void gsi_insert_before (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_after (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_remove (gimple_stmt_iterator *, bool);
gimple_stmt_iterator gsi_for_stmt (gimple);
void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *);
void gsi_insert_on_edge (edge, gimple);
void gsi_insert_seq_on_edge (edge, gimple_seq);
basic_block gsi_insert_on_edge_immediate (edge, gimple);
basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq);
void gsi_commit_one_edge_insert (edge, basic_block *);
void gsi_commit_edge_inserts (void);
gimple gimple_call_copy_skip_args (gimple, bitmap);
/* Convenience routines to walk all statements of a gimple function.
Note that this is useful exclusively before the code is converted
into SSA form. Once the program is in SSA form, the standard
operand interface should be used to analyze/modify statements. */
struct walk_stmt_info
{
/* Points to the current statement being walked. */
gimple_stmt_iterator gsi;
/* Additional data that the callback functions may want to carry
through the recursion. */
void *info;
/* Pointer map used to mark visited tree nodes when calling
walk_tree on each operand. If set to NULL, duplicate tree nodes
will be visited more than once. */
struct pointer_set_t *pset;
/* Indicates whether the operand being examined may be replaced
with something that matches is_gimple_val (if true) or something
slightly more complicated (if false). "Something" technically
means the common subset of is_gimple_lvalue and is_gimple_rhs,
but we never try to form anything more complicated than that, so
we don't bother checking.
Also note that CALLBACK should update this flag while walking the
sub-expressions of a statement. For instance, when walking the
statement 'foo (&var)', the flag VAL_ONLY will initially be set
to true, however, when walking &var, the operand of that
ADDR_EXPR does not need to be a GIMPLE value. */
bool val_only;
/* True if we are currently walking the LHS of an assignment. */
bool is_lhs;
/* Optional. Set to true by the callback functions if they made any
changes. */
bool changed;
/* True if we're interested in location information. */
bool want_locations;
/* Operand returned by the callbacks. This is set when calling
walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
returns non-NULL, this field will contain the tree returned by
the last callback. */
tree callback_result;
};
/* Callback for walk_gimple_stmt. Called for every statement found
during traversal. The first argument points to the statement to
walk. The second argument is a flag that the callback sets to
'true' if it the callback handled all the operands and
sub-statements of the statement (the default value of this flag is
'false'). The third argument is an anonymous pointer to data
to be used by the callback. */
typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *);
#ifdef GATHER_STATISTICS
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_seq, /* Sequences. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
#endif /* GATHER_STATISTICS */
extern void dump_gimple_statistics (void);
extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree,
enum tree_code, tree, tree);
#endif /* GCC_GIMPLE_H */
|
operations.c | //-----------------------------------------------------------------------
//Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam
//
//Author: Daniel M. Pelt
//Contact: D.M.Pelt@cwi.nl
//Website: http://dmpelt.github.io/msdnet/
//License: MIT
//
//This file is part of MSDNet, a Python implementation of the
//Mixed-Scale Dense Convolutional Neural Network.
//-----------------------------------------------------------------------
#include <math.h>
#ifdef _MSC_VER
#define DECLDIR __declspec(dllexport)
DECLDIR void set_threads(const unsigned int nthrd){}
#else
#define DECLDIR
#include <omp.h>
// OpenMP set number of threads
DECLDIR void set_threads(const unsigned int nthrd){
omp_set_num_threads(nthrd);
}
#endif
// Flattened array operations
DECLDIR void relu(float * const data, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(data[i]<0){
data[i]=0;
}
}
}
DECLDIR void leakyrelu(float * const data, const unsigned long n, const float w){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(data[i]<0){
data[i]*=w;
}
}
}
DECLDIR void relu2(const float * const inp, float * const out, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(inp[i]<=0){
out[i]=0;
}
}
}
DECLDIR void leakyrelu2(const float * const inp, float * const out, const unsigned long n, const float w){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(inp[i]<=0){
out[i]*=w;
}
}
}
DECLDIR void combine(const float * const inp, float * const out, const unsigned long n, const float w){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
out[i]+=w*inp[i];
}
}
DECLDIR float sum(const float * const inp, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=inp[i];
}
return (float)sum;
}
DECLDIR float masksum(const float * const inp, const float * const msk, const unsigned long n, const unsigned long nim){
long double sum=0;
long i, j;
#pragma omp parallel for reduction(+:sum) private(i, j)
for(i=0; i<n; i++){
if (msk[i] > 0){
for(j=0; j<nim; j++){
sum+=inp[j*n+i];
}
}
}
return (float)sum;
}
DECLDIR float std(const float * const inp, const float mn, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=(inp[i]-mn)*(inp[i]-mn);
}
return (float)sqrt(sum/n);
}
DECLDIR float multsum(const float * const a, const float * const b, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=a[i]*b[i];
}
return (float)sum;
}
DECLDIR void softmax(float * const im, const unsigned long n, const unsigned int nim){
#pragma omp parallel
{
float mx,sm;
long i;
unsigned int j;
#pragma omp for
for(i=0; i<n;i++){
mx=im[i];
for(j=1; j<nim; j++){
if(im[j*n+i]>mx){
mx = im[j*n+i];
}
}
sm=0;
for(j=0; j<nim; j++){
im[j*n+i] = expf(im[j*n+i]-mx);
sm += im[j*n+i];
}
for(j=0; j<nim; j++){
im[j*n+i] /= sm;
}
}
}
}
DECLDIR void diff(float * const out, const float * const a, const float * const b, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
out[i] = a[i] - b[i];
}
}
DECLDIR void squarediff(float * const out, const float * const a, const float * const b, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
out[i] = (a[i] - b[i])*(a[i] - b[i]);
}
}
DECLDIR void softmaxderiv(float * const out, const float * const err, const float * const act, const unsigned long n, const unsigned int nim){
#pragma omp parallel
{
float tmp;
long i;
unsigned int j, k;
#pragma omp for
for(i=0; i<n; i++){
for(j=0; j<nim; j++){
tmp = err[j*n+i]*act[j*n+i]*(1-act[j*n+i]);
for(k=0; k<j; k++) tmp -= act[j*n+i] * act[k*n+i] * err[k*n+i];
for(k=j+1; k<nim; k++) tmp -= act[j*n+i] * act[k*n+i] * err[k*n+i];
out[j*n+i] = tmp;
}
}
}
}
DECLDIR void crossentropylog(float * const out, const float * const im, const float * const tar, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if (tar[i]>0) out[i] = - logf(im[i]);
}
}
DECLDIR void crossentropyderiv(float * const out, const float * const im, const float * const tar, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if (tar[i]>0) out[i] = - 1/im[i];
}
}
DECLDIR long double squaresum(const float * const a, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=a[i]*a[i];
}
return sum;
}
// 2D operations
DECLDIR void conv2d(const float * const inp, float * const out, const float * const f, const unsigned int nx, const unsigned int ny, const int * const shx, const int * const shy){
#pragma omp parallel
{
long double tmp;
const float *rl, *rc, *rr;
int i;
#pragma omp for
for(i=0; i<nx; i++){
rl = inp + (shx[2*i])*ny;
rc = inp + i*ny;
rr = inp + (shx[2*i+1])*ny;
for(unsigned int j=0; j<ny; j++){
tmp=0;
tmp += rl[shy[2*j]]*f[0];
tmp += rl[j]*f[1];
tmp += rl[shy[2*j+1]]*f[2];
tmp += rc[shy[2*j]]*f[3];
tmp += rc[j]*f[4];
tmp += rc[shy[2*j+1]]*f[5];
tmp += rr[shy[2*j]]*f[6];
tmp += rr[j]*f[7];
tmp += rr[shy[2*j+1]]*f[8];
out[i*ny+j] += tmp;
}
}
}
}
DECLDIR float gradientmap2d(const float * const inp, const float * const delta, const unsigned int nx, const unsigned int ny, const int * const shx, const int * const shy){
long double sum=0;
#pragma omp parallel
{
const float *ix, *dx;
int i;
#pragma omp for reduction(+:sum)
for(i=0; i<nx; i++){
ix = inp + shx[i]*ny;
dx = delta + i*ny;
for(unsigned int j=0; j<ny; j++){
sum += ix[shy[j]]*dx[j];
}
}
}
return (float)sum;
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifing layer name. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
// When the specified layer name is not found in the EXR file, the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name, const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(wll be filled when the function returns error code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Free's error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
#define MZ_ASSERT(x) TEXR_ASSERT(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
} else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
struct ChannelInfo {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
};
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
TEXR_ASSERT(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
TEXR_ASSERT(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
TEXR_ASSERT(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
TEXR_ASSERT(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
TEXR_ASSERT(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
} else if (out < oe) {
*out++ = po;
} else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
TEXR_ASSERT(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
TEXR_ASSERT(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
TEXR_ASSERT(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
TEXR_ASSERT((dst_width % 4) == 0);
TEXR_ASSERT((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
TEXR_ASSERT(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
TEXR_ASSERT((width % 4) == 0);
TEXR_ASSERT((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
TEXR_ASSERT(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
TEXR_ASSERT(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
TEXR_ASSERT(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
TEXR_ASSERT(0);
}
}
#else
TEXR_ASSERT(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
TEXR_ASSERT(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
TEXR_ASSERT(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
TEXR_ASSERT(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
TEXR_ASSERT(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
TEXR_ASSERT(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
TEXR_ASSERT(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
TEXR_ASSERT(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
TEXR_ASSERT(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
TEXR_ASSERT(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
TEXR_ASSERT(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
TEXR_ASSERT(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
TEXR_ASSERT(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
TEXR_ASSERT(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
TEXR_ASSERT(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
} else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
TEXR_ASSERT(tile_offset_x * tile_size_x < data_width);
TEXR_ASSERT(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
TEXR_ASSERT(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
TEXR_ASSERT(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
TEXR_ASSERT(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
} else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
int err_code = TINYEXR_SUCCESS;
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<size_t> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
size_t tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data size.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
size_t data_size =
size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
if (tile_coordinates[3] != 0) {
err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE;
break;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
// TODO(LTE): atomic
if (err) {
(*err) += "Insufficient data length.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
break;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// TODO(LTE): atomic
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto &t : workers) {
t.join();
}
#else
}
#endif
if (err_code != TINYEXR_SUCCESS) {
return err_code;
}
exr_image->num_tiles = static_cast<int>(num_tiles);
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
TEXR_ASSERT(head < marker);
TEXR_ASSERT(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
tinyexr::SetErrorMessage("data width or data height is negative.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if (data_width > threshold) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > threshold) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader& exr_header, std::vector<std::string>& layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel (size_t i, std::string n)
: index(i)
, name(n)
{}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader& exr_header, const std::string layer_name, std::vector<LayerChannel>& channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
} else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos)
continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
}
else if (ch.name == "G") {
idxG = int(ch.index);
}
else if (ch.name == "B") {
idxB = int(ch.index);
}
else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
} else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
fclose(fp);
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
TEXR_ASSERT(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
TEXR_ASSERT(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// TOOD(LTE): C++11 thread
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
} else {
TEXR_ASSERT(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
} else {
TEXR_ASSERT(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] * static_cast<size_t>(exr_image->width)));
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
TEXR_ASSERT(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
TEXR_ASSERT(0);
#endif
} else {
TEXR_ASSERT(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
size_t totalSize = static_cast<size_t>(offset);
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
if (memory.size() == 0) {
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char *>(malloc(totalSize));
memcpy((*memory_out), &memory.at(0), memory.size());
unsigned char *memory_ptr = *memory_out + memory.size();
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size());
memory_ptr += data_list[i].size();
}
return totalSize; // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
fclose(fp);
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((0 != errcode) || (!fp)) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
TEXR_ASSERT(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
TEXR_ASSERT(dx >= 0);
TEXR_ASSERT(dy >= 0);
TEXR_ASSERT(dw >= 0);
TEXR_ASSERT(dh >= 0);
TEXR_ASSERT(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
TEXR_ASSERT(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
TEXR_ASSERT(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
TEXR_ASSERT(0);
}
}
sampleSize = channel_offset;
}
TEXR_ASSERT(sampleSize >= 2);
TEXR_ASSERT(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
TEXR_ASSERT(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
TEXR_ASSERT(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
fclose(fp);
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
TEXR_ASSERT(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
} else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
convolution_3x3_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_set1_ps(0.f);
__m128 _bias1 = bias ? _mm_loadu_ps((const float*)bias + (p + 1) * 4) : _mm_set1_ps(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m128 _k00_0 = _mm_loadu_ps(k0);
__m128 _k01_0 = _mm_loadu_ps(k0 + 4);
__m128 _k02_0 = _mm_loadu_ps(k0 + 8);
__m128 _k10_0 = _mm_loadu_ps(k0 + 12);
__m128 _k11_0 = _mm_loadu_ps(k0 + 16);
__m128 _k12_0 = _mm_loadu_ps(k0 + 20);
__m128 _k20_0 = _mm_loadu_ps(k0 + 24);
__m128 _k21_0 = _mm_loadu_ps(k0 + 28);
__m128 _k22_0 = _mm_loadu_ps(k0 + 32);
__m128 _k00_1 = _mm_loadu_ps(k1);
__m128 _k01_1 = _mm_loadu_ps(k1 + 4);
__m128 _k02_1 = _mm_loadu_ps(k1 + 8);
__m128 _k10_1 = _mm_loadu_ps(k1 + 12);
__m128 _k11_1 = _mm_loadu_ps(k1 + 16);
__m128 _k12_1 = _mm_loadu_ps(k1 + 20);
__m128 _k20_1 = _mm_loadu_ps(k1 + 24);
__m128 _k21_1 = _mm_loadu_ps(k1 + 28);
__m128 _k22_1 = _mm_loadu_ps(k1 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _sum10 = _mm_loadu_ps(outptr1);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm_comp_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r23, _k22_1, _sum10);
_mm_storeu_ps(outptr0, _sum00);
_mm_storeu_ps(outptr1, _sum10);
__m128 _sum01 = _mm_loadu_ps(outptr0 + 4);
__m128 _sum11 = _mm_loadu_ps(outptr1 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
_sum01 = _mm_comp_fmadd_ps(_r02, _k00_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r03, _k01_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r04, _k02_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r12, _k10_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r13, _k11_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r14, _k12_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r22, _k20_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r23, _k21_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r24, _k22_0, _sum01);
_sum11 = _mm_comp_fmadd_ps(_r02, _k00_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r03, _k01_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r04, _k02_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r12, _k10_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r13, _k11_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r14, _k12_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r22, _k20_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r23, _k21_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r24, _k22_1, _sum11);
_mm_storeu_ps(outptr0 + 4, _sum01);
_mm_storeu_ps(outptr1 + 4, _sum11);
__m128 _sum02 = _mm_loadu_ps(outptr0 + 8);
__m128 _sum12 = _mm_loadu_ps(outptr1 + 8);
__m128 _r05 = _mm_set1_ps(*(r0 + 4));
__m128 _r15 = _mm_set1_ps(*(r1 + 4));
__m128 _r25 = _mm_set1_ps(*(r2 + 4));
_sum02 = _mm_comp_fmadd_ps(_r03, _k00_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r04, _k01_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r05, _k02_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r13, _k10_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r14, _k11_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r15, _k12_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r23, _k20_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r24, _k21_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r25, _k22_0, _sum02);
_sum12 = _mm_comp_fmadd_ps(_r03, _k00_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r04, _k01_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r05, _k02_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r13, _k10_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r14, _k11_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r15, _k12_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r23, _k20_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r24, _k21_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r25, _k22_1, _sum12);
_mm_storeu_ps(outptr0 + 8, _sum02);
_mm_storeu_ps(outptr1 + 8, _sum12);
__m128 _r06 = _mm_set1_ps(*(r0 + 5));
__m128 _r16 = _mm_set1_ps(*(r1 + 5));
__m128 _r26 = _mm_set1_ps(*(r2 + 5));
__m128 _sum03 = _mm_loadu_ps(outptr0 + 12);
__m128 _sum13 = _mm_loadu_ps(outptr1 + 12);
_sum03 = _mm_comp_fmadd_ps(_r04, _k00_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r05, _k01_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r06, _k02_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r14, _k10_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r15, _k11_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r16, _k12_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r24, _k20_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r25, _k21_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r26, _k22_0, _sum03);
_sum13 = _mm_comp_fmadd_ps(_r04, _k00_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r05, _k01_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r06, _k02_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r14, _k10_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r15, _k11_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r16, _k12_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r24, _k20_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r25, _k21_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r26, _k22_1, _sum13);
_mm_storeu_ps(outptr0 + 12, _sum03);
_mm_storeu_ps(outptr1 + 12, _sum13);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 16;
outptr1 += 16;
}
for (; j + 1 < outw; j += 2)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _sum10 = _mm_loadu_ps(outptr1);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm_comp_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r23, _k22_1, _sum10);
_mm_storeu_ps(outptr0, _sum00);
_mm_storeu_ps(outptr1, _sum10);
__m128 _sum01 = _mm_loadu_ps(outptr0 + 4);
__m128 _sum11 = _mm_loadu_ps(outptr1 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
_sum01 = _mm_comp_fmadd_ps(_r02, _k00_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r03, _k01_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r04, _k02_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r12, _k10_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r13, _k11_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r14, _k12_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r22, _k20_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r23, _k21_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r24, _k22_0, _sum01);
_sum11 = _mm_comp_fmadd_ps(_r02, _k00_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r03, _k01_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r04, _k02_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r12, _k10_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r13, _k11_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r14, _k12_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r22, _k20_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r23, _k21_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r24, _k22_1, _sum11);
_mm_storeu_ps(outptr0 + 4, _sum01);
_mm_storeu_ps(outptr1 + 4, _sum11);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
outptr1 += 8;
}
for (; j < outw; j++)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _sum10 = _mm_loadu_ps(outptr1);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm_comp_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r23, _k22_1, _sum10);
_mm_storeu_ps(outptr0, _sum00);
_mm_storeu_ps(outptr1, _sum10);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
outptr1 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_set1_ps(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m128 _k00 = _mm_loadu_ps(k0);
__m128 _k01 = _mm_loadu_ps(k0 + 4);
__m128 _k02 = _mm_loadu_ps(k0 + 8);
__m128 _k10 = _mm_loadu_ps(k0 + 12);
__m128 _k11 = _mm_loadu_ps(k0 + 16);
__m128 _k12 = _mm_loadu_ps(k0 + 20);
__m128 _k20 = _mm_loadu_ps(k0 + 24);
__m128 _k21 = _mm_loadu_ps(k0 + 28);
__m128 _k22 = _mm_loadu_ps(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m128 _sum0 = _mm_loadu_ps(outptr0);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum0 = _mm_comp_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r23, _k22, _sum0);
__m128 _sum1 = _mm_loadu_ps(outptr0 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
_mm_storeu_ps(outptr0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r02, _k00, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r03, _k01, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r04, _k02, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r12, _k10, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r13, _k11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r14, _k12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r22, _k20, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r23, _k21, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r24, _k22, _sum1);
__m128 _sum2 = _mm_loadu_ps(outptr0 + 8);
__m128 _r05 = _mm_set1_ps(*(r0 + 4));
__m128 _r15 = _mm_set1_ps(*(r1 + 4));
__m128 _r25 = _mm_set1_ps(*(r2 + 4));
_mm_storeu_ps(outptr0 + 4, _sum1);
_sum2 = _mm_comp_fmadd_ps(_r03, _k00, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r04, _k01, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r05, _k02, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r13, _k10, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r14, _k11, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r15, _k12, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r23, _k20, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r24, _k21, _sum2);
_sum2 = _mm_comp_fmadd_ps(_r25, _k22, _sum2);
__m128 _sum3 = _mm_loadu_ps(outptr0 + 12);
__m128 _r06 = _mm_set1_ps(*(r0 + 5));
__m128 _r16 = _mm_set1_ps(*(r1 + 5));
__m128 _r26 = _mm_set1_ps(*(r2 + 5));
_mm_storeu_ps(outptr0 + 8, _sum2);
_sum3 = _mm_comp_fmadd_ps(_r04, _k00, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r05, _k01, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r06, _k02, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r14, _k10, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r15, _k11, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r16, _k12, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r24, _k20, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r25, _k21, _sum3);
_sum3 = _mm_comp_fmadd_ps(_r26, _k22, _sum3);
_mm_storeu_ps(outptr0 + 12, _sum3);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 16;
}
for (; j + 1 < outw; j += 2)
{
__m128 _sum0 = _mm_loadu_ps(outptr0);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum0 = _mm_comp_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r23, _k22, _sum0);
__m128 _sum1 = _mm_loadu_ps(outptr0 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
_mm_storeu_ps(outptr0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r02, _k00, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r03, _k01, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r04, _k02, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r12, _k10, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r13, _k11, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r14, _k12, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r22, _k20, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r23, _k21, _sum1);
_sum1 = _mm_comp_fmadd_ps(_r24, _k22, _sum1);
_mm_storeu_ps(outptr0 + 4, _sum1);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
}
for (; j < outw; j++)
{
__m128 _sum0 = _mm_loadu_ps(outptr0);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum0 = _mm_comp_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm_comp_fmadd_ps(_r23, _k22, _sum0);
_mm_storeu_ps(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 4;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 4;
}
}
}
static void conv3x3s2_pack1to4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_set1_ps(0.f);
__m128 _bias1 = bias ? _mm_loadu_ps((const float*)bias + (p + 1) * 4) : _mm_set1_ps(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m128 _k00_0 = _mm_loadu_ps(k0);
__m128 _k01_0 = _mm_loadu_ps(k0 + 4);
__m128 _k02_0 = _mm_loadu_ps(k0 + 8);
__m128 _k10_0 = _mm_loadu_ps(k0 + 12);
__m128 _k11_0 = _mm_loadu_ps(k0 + 16);
__m128 _k12_0 = _mm_loadu_ps(k0 + 20);
__m128 _k20_0 = _mm_loadu_ps(k0 + 24);
__m128 _k21_0 = _mm_loadu_ps(k0 + 28);
__m128 _k22_0 = _mm_loadu_ps(k0 + 32);
__m128 _k00_1 = _mm_loadu_ps(k1);
__m128 _k01_1 = _mm_loadu_ps(k1 + 4);
__m128 _k02_1 = _mm_loadu_ps(k1 + 8);
__m128 _k10_1 = _mm_loadu_ps(k1 + 12);
__m128 _k11_1 = _mm_loadu_ps(k1 + 16);
__m128 _k12_1 = _mm_loadu_ps(k1 + 20);
__m128 _k20_1 = _mm_loadu_ps(k1 + 24);
__m128 _k21_1 = _mm_loadu_ps(k1 + 28);
__m128 _k22_1 = _mm_loadu_ps(k1 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _sum10 = _mm_loadu_ps(outptr1);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm_comp_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r23, _k22_1, _sum10);
_mm_storeu_ps(outptr0, _sum00);
_mm_storeu_ps(outptr1, _sum10);
__m128 _sum01 = _mm_loadu_ps(outptr0 + 4);
__m128 _sum11 = _mm_loadu_ps(outptr1 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
__m128 _r05 = _mm_set1_ps(*(r0 + 4));
__m128 _r15 = _mm_set1_ps(*(r1 + 4));
__m128 _r25 = _mm_set1_ps(*(r2 + 4));
_sum01 = _mm_comp_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r25, _k22_0, _sum01);
_sum11 = _mm_comp_fmadd_ps(_r03, _k00_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r04, _k01_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r05, _k02_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r13, _k10_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r14, _k11_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r15, _k12_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r23, _k20_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r24, _k21_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r25, _k22_1, _sum11);
_mm_storeu_ps(outptr0 + 4, _sum01);
_mm_storeu_ps(outptr1 + 4, _sum11);
__m128 _sum02 = _mm_loadu_ps(outptr0 + 8);
__m128 _sum12 = _mm_loadu_ps(outptr1 + 8);
__m128 _r06 = _mm_set1_ps(*(r0 + 5));
__m128 _r16 = _mm_set1_ps(*(r1 + 5));
__m128 _r26 = _mm_set1_ps(*(r2 + 5));
__m128 _r07 = _mm_set1_ps(*(r0 + 6));
__m128 _r17 = _mm_set1_ps(*(r1 + 6));
__m128 _r27 = _mm_set1_ps(*(r2 + 6));
_sum02 = _mm_comp_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r27, _k22_0, _sum02);
_sum12 = _mm_comp_fmadd_ps(_r05, _k00_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r06, _k01_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r07, _k02_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r15, _k10_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r16, _k11_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r17, _k12_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r25, _k20_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r26, _k21_1, _sum12);
_sum12 = _mm_comp_fmadd_ps(_r27, _k22_1, _sum12);
_mm_storeu_ps(outptr0 + 8, _sum02);
_mm_storeu_ps(outptr1 + 8, _sum12);
__m128 _r08 = _mm_set1_ps(*(r0 + 7));
__m128 _r18 = _mm_set1_ps(*(r1 + 7));
__m128 _r28 = _mm_set1_ps(*(r2 + 7));
__m128 _r09 = _mm_set1_ps(*(r0 + 8));
__m128 _r19 = _mm_set1_ps(*(r1 + 8));
__m128 _r29 = _mm_set1_ps(*(r2 + 8));
__m128 _sum03 = _mm_loadu_ps(outptr0 + 12);
__m128 _sum13 = _mm_loadu_ps(outptr1 + 12);
_sum03 = _mm_comp_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r29, _k22_0, _sum03);
_sum13 = _mm_comp_fmadd_ps(_r07, _k00_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r08, _k01_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r09, _k02_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r17, _k10_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r18, _k11_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r19, _k12_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r27, _k20_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r28, _k21_1, _sum13);
_sum13 = _mm_comp_fmadd_ps(_r29, _k22_1, _sum13);
_mm_storeu_ps(outptr0 + 12, _sum03);
_mm_storeu_ps(outptr1 + 12, _sum13);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 16;
outptr1 += 16;
}
for (; j + 1 < outw; j += 2)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _sum10 = _mm_loadu_ps(outptr1);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm_comp_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r23, _k22_1, _sum10);
_mm_storeu_ps(outptr0, _sum00);
_mm_storeu_ps(outptr1, _sum10);
__m128 _sum01 = _mm_loadu_ps(outptr0 + 4);
__m128 _sum11 = _mm_loadu_ps(outptr1 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
__m128 _r05 = _mm_set1_ps(*(r0 + 4));
__m128 _r15 = _mm_set1_ps(*(r1 + 4));
__m128 _r25 = _mm_set1_ps(*(r2 + 4));
_sum01 = _mm_comp_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r25, _k22_0, _sum01);
_sum11 = _mm_comp_fmadd_ps(_r03, _k00_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r04, _k01_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r05, _k02_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r13, _k10_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r14, _k11_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r15, _k12_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r23, _k20_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r24, _k21_1, _sum11);
_sum11 = _mm_comp_fmadd_ps(_r25, _k22_1, _sum11);
_mm_storeu_ps(outptr0 + 4, _sum01);
_mm_storeu_ps(outptr1 + 4, _sum11);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 8;
outptr1 += 8;
}
for (; j < outw; j++)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _sum10 = _mm_loadu_ps(outptr1);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm_comp_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm_comp_fmadd_ps(_r23, _k22_1, _sum10);
_mm_storeu_ps(outptr0, _sum00);
_mm_storeu_ps(outptr1, _sum10);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
outptr1 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
k1 += 9 * 4;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + p * 4) : _mm_set1_ps(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m128 _k00_0 = _mm_loadu_ps(k0);
__m128 _k01_0 = _mm_loadu_ps(k0 + 4);
__m128 _k02_0 = _mm_loadu_ps(k0 + 8);
__m128 _k10_0 = _mm_loadu_ps(k0 + 12);
__m128 _k11_0 = _mm_loadu_ps(k0 + 16);
__m128 _k12_0 = _mm_loadu_ps(k0 + 20);
__m128 _k20_0 = _mm_loadu_ps(k0 + 24);
__m128 _k21_0 = _mm_loadu_ps(k0 + 28);
__m128 _k22_0 = _mm_loadu_ps(k0 + 32);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_mm_storeu_ps(outptr0, _sum00);
__m128 _sum01 = _mm_loadu_ps(outptr0 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
__m128 _r05 = _mm_set1_ps(*(r0 + 4));
__m128 _r15 = _mm_set1_ps(*(r1 + 4));
__m128 _r25 = _mm_set1_ps(*(r2 + 4));
_sum01 = _mm_comp_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r25, _k22_0, _sum01);
_mm_storeu_ps(outptr0 + 4, _sum01);
__m128 _sum02 = _mm_loadu_ps(outptr0 + 8);
__m128 _r06 = _mm_set1_ps(*(r0 + 5));
__m128 _r16 = _mm_set1_ps(*(r1 + 5));
__m128 _r26 = _mm_set1_ps(*(r2 + 5));
__m128 _r07 = _mm_set1_ps(*(r0 + 6));
__m128 _r17 = _mm_set1_ps(*(r1 + 6));
__m128 _r27 = _mm_set1_ps(*(r2 + 6));
_sum02 = _mm_comp_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r27, _k22_0, _sum02);
_mm_storeu_ps(outptr0 + 8, _sum02);
__m128 _r08 = _mm_set1_ps(*(r0 + 7));
__m128 _r18 = _mm_set1_ps(*(r1 + 7));
__m128 _r28 = _mm_set1_ps(*(r2 + 7));
__m128 _r09 = _mm_set1_ps(*(r0 + 8));
__m128 _r19 = _mm_set1_ps(*(r1 + 8));
__m128 _r29 = _mm_set1_ps(*(r2 + 8));
__m128 _sum03 = _mm_loadu_ps(outptr0 + 12);
_sum03 = _mm_comp_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r29, _k22_0, _sum03);
_mm_storeu_ps(outptr0 + 12, _sum03);
__m128 _r010 = _mm_set1_ps(*(r0 + 9));
__m128 _r110 = _mm_set1_ps(*(r1 + 9));
__m128 _r210 = _mm_set1_ps(*(r2 + 9));
__m128 _r011 = _mm_set1_ps(*(r0 + 10));
__m128 _r111 = _mm_set1_ps(*(r1 + 10));
__m128 _r211 = _mm_set1_ps(*(r2 + 10));
__m128 _sum04 = _mm_loadu_ps(outptr0 + 16);
_sum04 = _mm_comp_fmadd_ps(_r09, _k00_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r010, _k01_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r011, _k02_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r19, _k10_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r110, _k11_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r111, _k12_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r29, _k20_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r210, _k21_0, _sum04);
_sum04 = _mm_comp_fmadd_ps(_r211, _k22_0, _sum04);
_mm_storeu_ps(outptr0 + 16, _sum04);
__m128 _r012 = _mm_set1_ps(*(r0 + 11));
__m128 _r112 = _mm_set1_ps(*(r1 + 11));
__m128 _r212 = _mm_set1_ps(*(r2 + 11));
__m128 _r013 = _mm_set1_ps(*(r0 + 12));
__m128 _r113 = _mm_set1_ps(*(r1 + 12));
__m128 _r213 = _mm_set1_ps(*(r2 + 12));
__m128 _sum05 = _mm_loadu_ps(outptr0 + 20);
_sum05 = _mm_comp_fmadd_ps(_r011, _k00_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r012, _k01_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r013, _k02_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r111, _k10_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r112, _k11_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r113, _k12_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r211, _k20_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r212, _k21_0, _sum05);
_sum05 = _mm_comp_fmadd_ps(_r213, _k22_0, _sum05);
_mm_storeu_ps(outptr0 + 20, _sum05);
__m128 _r014 = _mm_set1_ps(*(r0 + 13));
__m128 _r114 = _mm_set1_ps(*(r1 + 13));
__m128 _r214 = _mm_set1_ps(*(r2 + 13));
__m128 _r015 = _mm_set1_ps(*(r0 + 14));
__m128 _r115 = _mm_set1_ps(*(r1 + 14));
__m128 _r215 = _mm_set1_ps(*(r2 + 14));
__m128 _sum06 = _mm_loadu_ps(outptr0 + 24);
_sum06 = _mm_comp_fmadd_ps(_r013, _k00_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r014, _k01_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r015, _k02_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r113, _k10_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r114, _k11_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r115, _k12_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r213, _k20_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r214, _k21_0, _sum06);
_sum06 = _mm_comp_fmadd_ps(_r215, _k22_0, _sum06);
_mm_storeu_ps(outptr0 + 24, _sum06);
__m128 _r016 = _mm_set1_ps(*(r0 + 15));
__m128 _r116 = _mm_set1_ps(*(r1 + 15));
__m128 _r216 = _mm_set1_ps(*(r2 + 15));
__m128 _r017 = _mm_set1_ps(*(r0 + 16));
__m128 _r117 = _mm_set1_ps(*(r1 + 16));
__m128 _r217 = _mm_set1_ps(*(r2 + 16));
__m128 _sum07 = _mm_loadu_ps(outptr0 + 28);
_sum07 = _mm_comp_fmadd_ps(_r015, _k00_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r016, _k01_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r017, _k02_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r115, _k10_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r116, _k11_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r117, _k12_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r215, _k20_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r216, _k21_0, _sum07);
_sum07 = _mm_comp_fmadd_ps(_r217, _k22_0, _sum07);
_mm_storeu_ps(outptr0 + 28, _sum07);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 32;
}
for (; j + 3 < outw; j += 4)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_mm_storeu_ps(outptr0, _sum00);
__m128 _sum01 = _mm_loadu_ps(outptr0 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
__m128 _r05 = _mm_set1_ps(*(r0 + 4));
__m128 _r15 = _mm_set1_ps(*(r1 + 4));
__m128 _r25 = _mm_set1_ps(*(r2 + 4));
_sum01 = _mm_comp_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r25, _k22_0, _sum01);
_mm_storeu_ps(outptr0 + 4, _sum01);
__m128 _sum02 = _mm_loadu_ps(outptr0 + 8);
__m128 _r06 = _mm_set1_ps(*(r0 + 5));
__m128 _r16 = _mm_set1_ps(*(r1 + 5));
__m128 _r26 = _mm_set1_ps(*(r2 + 5));
__m128 _r07 = _mm_set1_ps(*(r0 + 6));
__m128 _r17 = _mm_set1_ps(*(r1 + 6));
__m128 _r27 = _mm_set1_ps(*(r2 + 6));
_sum02 = _mm_comp_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm_comp_fmadd_ps(_r27, _k22_0, _sum02);
_mm_storeu_ps(outptr0 + 8, _sum02);
__m128 _r08 = _mm_set1_ps(*(r0 + 7));
__m128 _r18 = _mm_set1_ps(*(r1 + 7));
__m128 _r28 = _mm_set1_ps(*(r2 + 7));
__m128 _r09 = _mm_set1_ps(*(r0 + 8));
__m128 _r19 = _mm_set1_ps(*(r1 + 8));
__m128 _r29 = _mm_set1_ps(*(r2 + 8));
__m128 _sum03 = _mm_loadu_ps(outptr0 + 12);
_sum03 = _mm_comp_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm_comp_fmadd_ps(_r29, _k22_0, _sum03);
_mm_storeu_ps(outptr0 + 12, _sum03);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 16;
}
for (; j + 1 < outw; j += 2)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_mm_storeu_ps(outptr0, _sum00);
__m128 _sum01 = _mm_loadu_ps(outptr0 + 4);
__m128 _r04 = _mm_set1_ps(*(r0 + 3));
__m128 _r14 = _mm_set1_ps(*(r1 + 3));
__m128 _r24 = _mm_set1_ps(*(r2 + 3));
__m128 _r05 = _mm_set1_ps(*(r0 + 4));
__m128 _r15 = _mm_set1_ps(*(r1 + 4));
__m128 _r25 = _mm_set1_ps(*(r2 + 4));
_sum01 = _mm_comp_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm_comp_fmadd_ps(_r25, _k22_0, _sum01);
_mm_storeu_ps(outptr0 + 4, _sum01);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 8;
}
for (; j < outw; j++)
{
__m128 _sum00 = _mm_loadu_ps(outptr0);
__m128 _r01 = _mm_set1_ps(*(r0));
__m128 _r02 = _mm_set1_ps(*(r0 + 1));
__m128 _r03 = _mm_set1_ps(*(r0 + 2));
__m128 _r11 = _mm_set1_ps(*(r1));
__m128 _r12 = _mm_set1_ps(*(r1 + 1));
__m128 _r13 = _mm_set1_ps(*(r1 + 2));
__m128 _r21 = _mm_set1_ps(*(r2));
__m128 _r22 = _mm_set1_ps(*(r2 + 1));
__m128 _r23 = _mm_set1_ps(*(r2 + 2));
_sum00 = _mm_comp_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm_comp_fmadd_ps(_r23, _k22_0, _sum00);
_mm_storeu_ps(outptr0, _sum00);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 4;
}
}
}
|
pdlangb.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzlangb.c, normal z -> d, Fri Sep 28 17:38:12 2018
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#include "core_lapack.h"
#define A(m, n) (double*)plasma_tile_addr(A, m, n)
/***************************************************************************//**
* Parallel tile calculation of max, one, infinity or Frobenius matrix norm
* for a general band matrix.
******************************************************************************/
void plasma_pdlangb(plasma_enum_t norm,
plasma_desc_t A, double *work, double *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
double stub;
int wcnt = 0;
int ldwork, klt, kut;
double *workspace, *scale, *sumsq;
switch (norm) {
//================
// PlasmaMaxNorm
//================
case PlasmaMaxNorm:
wcnt = 0;
for (int n = 0; n < A.nt; n++ ) {
int nvan = plasma_tile_nview(A, n);
int m_start = (imax(0, n*A.nb-A.ku)) / A.nb;
int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb;
for (int m = m_start; m <= m_end; m++ ) {
int ldam = plasma_tile_mmain_band(A, m, n);
int mvam = plasma_tile_mview(A, m);
plasma_core_omp_dlange(PlasmaMaxNorm,
mvam, nvan,
A(m, n), ldam,
&stub, &work[wcnt],
sequence, request);
wcnt++;
}
}
#pragma omp taskwait
plasma_core_omp_dlange(PlasmaMaxNorm,
1, wcnt,
work, 1,
&stub, value,
sequence, request);
break;
//================
// PlasmaOneNorm
//================
case PlasmaOneNorm:
// # of tiles in upper band (not including diagonal)
kut = (A.ku+A.nb-1)/A.nb;
// # of tiles in lower band (not including diagonal)
klt = (A.kl+A.nb-1)/A.nb;
ldwork = kut+klt+1;
for (int n = 0; n < A.nt; n++ ) {
int nvan = plasma_tile_nview(A, n);
int m_start = (imax(0, n*A.nb-A.ku)) / A.nb;
int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb;
for (int m = m_start; m <= m_end; m++ ) {
int ldam = plasma_tile_mmain_band(A, m, n);
int mvam = plasma_tile_mview(A, m);
plasma_core_omp_dlange_aux(PlasmaOneNorm,
mvam, nvan,
A(m,n), ldam,
&work[(m-m_start)*A.n+n*A.nb],
sequence, request);
}
}
#pragma omp taskwait
workspace = &work[A.n*ldwork];
plasma_core_omp_dlange(PlasmaInfNorm,
A.n, ldwork,
work, A.n,
workspace, value,
sequence, request);
break;
//================
// PlasmaInfNorm
//================
case PlasmaInfNorm:
ldwork = A.mb*A.mt;
for (int n = 0; n < A.nt; n++ ) {
int nvan = plasma_tile_nview(A, n);
int m_start = (imax(0, n*A.nb-A.ku)) / A.nb;
int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb;
for (int m = m_start; m <= m_end; m++ ) {
int ldam = plasma_tile_mmain_band(A, m, n);
int mvam = plasma_tile_mview(A, m);
plasma_core_omp_dlange_aux(PlasmaInfNorm,
mvam, nvan,
A(m,n), ldam,
&work[m*A.mb+n*ldwork],
sequence, request);
}
}
#pragma omp taskwait
//nwork = A.nt;
workspace = &work[ldwork*A.nt];
plasma_core_omp_dlange(PlasmaInfNorm,
ldwork, A.nt,
work, ldwork,
workspace, value,
sequence, request);
break;
//======================
// PlasmaFrobeniusNorm
//======================
case PlasmaFrobeniusNorm:
kut = (A.ku+A.nb-1)/A.nb; // # of tiles in upper band (not including diagonal)
klt = (A.kl+A.nb-1)/A.nb; // # of tiles in lower band (not including diagonal)
ldwork = kut+klt+1;
scale = work;
sumsq = &work[ldwork*A.nt];
for (int n = 0; n < A.nt; n++ ) {
int nvan = plasma_tile_nview(A, n);
int m_start = (imax(0, n*A.nb-A.ku)) / A.nb;
int m_end = (imin(A.m-1, (n+1)*A.nb+A.kl-1)) / A.nb;
for (int m = m_start; m <= m_end; m++ ) {
int ldam = plasma_tile_mmain_band(A, m, n);
int mvam = plasma_tile_mview(A, m);
plasma_core_omp_dgessq(mvam, nvan,
A(m,n), ldam,
&scale[n*ldwork+m-m_start],
&sumsq[n*ldwork+m-m_start],
sequence, request);
}
}
#pragma omp taskwait
plasma_core_omp_dgessq_aux(ldwork*A.nt, scale, sumsq,
value, sequence, request);
break;
default:
assert(0);
}
}
|
teams.c | #include <stdio.h>
#include <omp.h>
#define HOST_MAX_TEAMS 128
#define TRIALS (1)
#define N (992)
int main(void) {
int A[N], B[N], C[N], D[N], E[N];
int fail = 0;
//
// Test: num_teams and omp_get_team_num()
//
for (int i = 0; i < N; i++) {
A[i] = 0;
C[i] = 1;
D[i] = i;
E[i] = (-1)*i;
}
int num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512;
printf("Using num_teams = %d\n", num_teams);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams num_teams(num_teams)
{
A[omp_get_team_num()] += omp_get_team_num();
}
}
for (int i = 0 ; i < num_teams ; i++)
if (A[i] != i*TRIALS) {
printf("Error at %d, h = %d, d = %d\n", i, i*TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return fail;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
bool CalculateConstraintSatisfaction(ConceptDecl *NamedConcept,
MultiLevelTemplateArgumentList &MLTAL,
Expr *ConstraintExpr,
bool &IsSatisfied);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(FunctionDecl *MD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, TemplateDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
using OMPCtxStringType = SmallString<8>;
using OMPCtxSelectorData =
OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>;
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
SourceRange SR,
ArrayRef<OMPCtxSelectorData> Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
bool &DerivedToBase, bool &ObjCConversion,
bool &ObjCLifetimeConversion,
bool &FunctionConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
omp_for_schedule_guided.c | // RUN: %libomp-compile-and-run
/* Test for guided scheduling
* Ensure threads get chunks interleavely first
* Then judge the chunk sizes are decreasing to a stable value
* Modified by Chunhua Liao
* For example, 100 iteration on 2 threads, chunksize 7
* one line for each dispatch, 0/1 means thread id
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24
* 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 18
* 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14
* 1 1 1 1 1 1 1 1 1 1 10
* 0 0 0 0 0 0 0 0 8
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 0 0 7
* 1 1 1 1 1 1 1 7
* 0 0 0 0 0 5
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
#define CFSMAX_SIZE 1000
#define MAX_TIME 0.005
#ifdef SLEEPTIME
#undef SLEEPTIME
#define SLEEPTIME 0.0001
#endif
int test_omp_for_schedule_guided()
{
int * tids;
int * chunksizes;
int notout;
int maxiter;
int threads;
int i;
int result;
tids = (int *) malloc (sizeof (int) * (CFSMAX_SIZE + 1));
maxiter = 0;
result = 1;
notout = 1;
/* Testing if enough threads are available for this check. */
#pragma omp parallel
{
#pragma omp single
{
threads = omp_get_num_threads();
}
}
/* ensure there are at least two threads */
if (threads < 2) {
omp_set_num_threads(2);
threads = 2;
}
/* Now the real parallel work:
* Each thread will start immediately with the first chunk.
*/
#pragma omp parallel shared(tids,maxiter)
{ /* begin of parallel */
double count;
int tid;
int j;
tid = omp_get_thread_num ();
#pragma omp for nowait schedule(guided)
for(j = 0; j < CFSMAX_SIZE; ++j) {
count = 0.;
#pragma omp flush(maxiter)
if (j > maxiter) {
#pragma omp critical
{
maxiter = j;
}
}
/*printf ("thread %d sleeping\n", tid);*/
#pragma omp flush(maxiter,notout)
while (notout && (count < MAX_TIME) && (maxiter == j)) {
#pragma omp flush(maxiter,notout)
my_sleep (SLEEPTIME);
count += SLEEPTIME;
#ifdef VERBOSE
printf(".");
#endif
}
#ifdef VERBOSE
if (count > 0.) printf(" waited %lf s\n", count);
#endif
/*printf ("thread %d awake\n", tid);*/
tids[j] = tid;
#ifdef VERBOSE
printf("%d finished by %d\n",j,tid);
#endif
} /* end of for */
notout = 0;
#pragma omp flush(maxiter,notout)
} /* end of parallel */
/*******************************************************
* evaluation of the values *
*******************************************************/
{
int determined_chunksize = 1;
int last_threadnr = tids[0];
int global_chunknr = 0;
int openwork = CFSMAX_SIZE;
int expected_chunk_size;
int* local_chunknr = (int*)malloc(threads * sizeof(int));
double c = 1;
for (i = 0; i < threads; i++)
local_chunknr[i] = 0;
tids[CFSMAX_SIZE] = -1;
/*
* determine the number of global chunks
*/
// fprintf(stderr,"# global_chunknr thread local_chunknr chunksize\n");
for(i = 1; i <= CFSMAX_SIZE; ++i) {
if (last_threadnr==tids[i]) {
determined_chunksize++;
} else {
/* fprintf(stderr, "%d\t%d\t%d\t%d\n", global_chunknr,
last_threadnr, local_chunknr[last_threadnr], m); */
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
/* now allocate the memory for saving the sizes of the global chunks */
chunksizes = (int*)malloc(global_chunknr * sizeof(int));
/*
* Evaluate the sizes of the global chunks
*/
global_chunknr = 0;
determined_chunksize = 1;
last_threadnr = tids[0];
for (i = 1; i <= CFSMAX_SIZE; ++i) {
/* If the threadnumber was the same as before increase the
* detected chunksize for this chunk otherwise set the detected
* chunksize again to one and save the number of the next
* thread in last_threadnr.
*/
if (last_threadnr == tids[i]) {
determined_chunksize++;
} else {
chunksizes[global_chunknr] = determined_chunksize;
global_chunknr++;
local_chunknr[last_threadnr]++;
last_threadnr = tids[i];
determined_chunksize = 1;
}
}
#ifdef VERBOSE
fprintf(stderr, "found\texpected\tconstant\n");
#endif
/* identify the constant c for the exponential
decrease of the chunksize */
expected_chunk_size = openwork / threads;
c = (double) chunksizes[0] / expected_chunk_size;
for (i = 0; i < global_chunknr; i++) {
/* calculate the new expected chunksize */
if (expected_chunk_size > 1)
expected_chunk_size = c * openwork / threads;
#ifdef VERBOSE
fprintf(stderr, "%8d\t%8d\t%lf\n", chunksizes[i],
expected_chunk_size, c * chunksizes[i]/expected_chunk_size);
#endif
/* check if chunksize is inside the rounding errors */
if (abs (chunksizes[i] - expected_chunk_size) >= 2) {
result = 0;
#ifndef VERBOSE
fprintf(stderr, "Chunksize differed from expected "
"value: %d instead of %d\n", chunksizes[i],
expected_chunk_size);
return 0;
#endif
} /* end if */
#ifndef VERBOSE
if (expected_chunk_size - chunksizes[i] < 0)
fprintf(stderr, "Chunksize did not decrease: %d"
" instead of %d\n", chunksizes[i],expected_chunk_size);
#endif
/* calculating the remaining amount of work */
openwork -= chunksizes[i];
}
}
return result;
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_schedule_guided()) {
num_failed++;
}
}
return num_failed;
}
|
Example_teams.4.c | /*
* @@name: teams.4c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
#define N 1024*1024
float dotprod(float B[], float C[])
{
float sum = 0.0;
int i;
#pragma omp target map(to: B[0:N], C[0:N]) map(tofrom: sum)
#pragma omp teams num_teams(8) thread_limit(16) reduction(+:sum)
#pragma omp distribute parallel for reduction(+:sum) \
dist_schedule(static, 1024) schedule(static, 64)
for (i=0; i<N; i++)
sum += B[i] * C[i];
return sum;
}
/* Note: The variable sum is now mapped with tofrom, for correct
execution with 4.5 (and pre-4.5) compliant compilers. See Devices Intro.
*/
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
*has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
if (type == 0)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
}
color=image->colormap+(ssize_t) ConstrainColormapIndex(image,
(ssize_t) GetPixelIndex(image,q),exception);
if ((type == 0) && (channels > 1))
return;
else
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
{
status=MagickFalse;
break;
}
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
/*
else if (packet_size == 4)
{
TODO: Figure out what to do there.
}
*/
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
/* TODO: Remove this when we figure out how to support this */
if ((compression == ZipWithPrediction) && (image->depth == 32))
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)");
return(MagickFalse);
}
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const ImageInfo *image_info,
const size_t index)
{
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,(size_t) count);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
if (size == 0)
return(MagickTrue);
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) type);
if (count == 4)
ReversePSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ?
256 : 65536),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(image,blocks,(size_t) length,
&has_merged_image,exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if ((has_merged_image != MagickFalse) || (imageListLength == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_last = kind_pointer
};
public:
SYCLIntegrationHeader(DiagnosticsEngine &Diag, bool UnnamedLambdaSupport);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(const StringRef &MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(StringRef KernelName, QualType KernelNameType,
StringRef KernelStableName);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// Kernel invocation descriptor
struct KernelDesc {
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
KernelDesc() = default;
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
/// Emits a forward declaration for given declaration.
void emitFwdDecl(raw_ostream &O, const Decl *D);
/// Emits forward declarations of classes and template classes on which
/// declaration of given type depends. See example in the comments for the
/// implementation.
/// \param O
/// stream to emit to
/// \param T
/// type to emit forward declarations for
/// \param Emitted
/// a set of declarations forward declrations has been emitted for already
void emitForwardClassDecls(raw_ostream &O, QualType T,
llvm::SmallPtrSetImpl<const void*> &Emitted);
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
/// Used for emitting diagnostics.
DiagnosticsEngine &Diag;
/// Whether header is generated with unnamed lambda support
bool UnnamedLambdaSupport;
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
template <typename FPGALoopAttrT>
FPGALoopAttrT *BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation OpLoc,
TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation OpLoc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L,
SourceLocation R, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L,
SourceLocation R, Expr *Operand);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(TemplateDecl *Template,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
bool CheckConstraintSatisfaction(ClassTemplatePartialSpecializationDecl *TD,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
bool CheckConstraintSatisfaction(VarTemplatePartialSpecializationDecl *TD,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction& Satisfaction);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
template <typename AttrType>
bool checkRangedIntegralArgument(Expr *E, const AttrType *TmpAttr,
ExprResult &Result);
template <typename AttrType>
void AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
template <typename AttrType>
void AddOneConstantPowerTwoValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
using OMPCtxStringType = SmallString<8>;
using OMPCtxSelectorData =
OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>;
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
SourceRange SR,
ArrayRef<OMPCtxSelectorData> Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
bool &DerivedToBase, bool &ObjCConversion,
bool &ObjCLifetimeConversion,
bool &FunctionConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckIntelFPGABuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
SmallVector<Decl*, 4> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
public:
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.push_back(d); }
SmallVectorImpl<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(
getDiagnostics(), getLangOpts().SYCLUnnamedLambda);
return *SyclIntHeader.get();
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelHavePolymorphicClass,
KernelCallDllimportFunction,
KernelCallVariadicFunction
};
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void MarkDevice(void);
bool CheckSYCLCall(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.