source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: define the width and height of the border.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,const CompositeOperator compose,
ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,compose,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
const CompositeOperator compose,ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
accentuate,
highlight,
matte,
shadow,
trough;
ssize_t
x_offset,
y_offset;
size_t
bevel_width,
height;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x_offset=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y_offset=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x_offset < (ssize_t) image->columns) ||
(y_offset < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse)
{
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace,exception);
if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) &&
(frame_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(frame_image,OpaqueAlpha,exception);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
matte=image->matte_color;
accentuate=matte;
accentuate.red=(QuantumScale*((QuantumRange-(double) AccentuateModulate)*
matte.red+(QuantumRange*(double) AccentuateModulate)));
accentuate.green=(QuantumScale*((QuantumRange-(double) AccentuateModulate)*
matte.green+(QuantumRange*(double) AccentuateModulate)));
accentuate.blue=(QuantumScale*((QuantumRange-(double) AccentuateModulate)*
matte.blue+(QuantumRange*(double) AccentuateModulate)));
accentuate.black=(QuantumScale*((QuantumRange-(double) AccentuateModulate)*
matte.black+(QuantumRange*(double) AccentuateModulate)));
accentuate.alpha=matte.alpha;
highlight=matte;
highlight.red=(QuantumScale*((QuantumRange-(double) HighlightModulate)*
matte.red+(QuantumRange*(double) HighlightModulate)));
highlight.green=(QuantumScale*((QuantumRange-(double) HighlightModulate)*
matte.green+(QuantumRange*(double) HighlightModulate)));
highlight.blue=(QuantumScale*((QuantumRange-(double) HighlightModulate)*
matte.blue+(QuantumRange*(double) HighlightModulate)));
highlight.black=(QuantumScale*((QuantumRange-(double) HighlightModulate)*
matte.black+(QuantumRange*(double) HighlightModulate)));
highlight.alpha=matte.alpha;
shadow=matte;
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.black=QuantumScale*matte.black*ShadowModulate;
shadow.alpha=matte.alpha;
trough=matte;
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.black=QuantumScale*matte.black*TroughModulate;
trough.alpha=matte.alpha;
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
size_t
width;
ssize_t
x;
Quantum
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,frame_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
size_t
width;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&frame_image->border_color,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FrameImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
size_t
width;
ssize_t
x;
Quantum
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw bottom of ornamental border.
*/
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x_offset=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y_offset=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image,compose,MagickTrue,x_offset,
y_offset,exception);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise,
ExceptionInfo *exception)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,raise_info->height,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
ssize_t
i,
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+
(double) foreground*(QuantumRange-AccentuateFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-2*raise_info->height,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
ssize_t
i,
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q+=GetPixelChannels(image);
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-raise_info->height,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
ssize_t
i,
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+
(double) background*(QuantumRange-TroughFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RaiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GB_unaryop__ainv_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_fp64
// op(A') function: GB_tran__ainv_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int8_t z ; GB_CAST_SIGNED(z,aij,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_fp64
(
int8_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_taskloop_grainsize.c | // RUN: %libomp-compile-and-run
// RUN: %libomp-compile && env KMP_TASKLOOP_MIN_TASKS=1 %libomp-run
// These compilers don't support the taskloop construct
// UNSUPPORTED: gcc-4, gcc-5, icc-16
// GCC 6 has support for taskloops, but at least 6.3.0 is crashing on this test
// UNSUPPORTED: gcc-6
/*
* Test for taskloop
* Method: calculate how many times the iteration space is dispatched
* and judge if each dispatch has the requested grainsize
* It is possible for two adjacent chunks are executed by the same thread
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#define CFDMAX_SIZE 1120
int test_omp_taskloop_grainsize()
{
int result = 0;
int i, grainsize, count, tmp_count, num_off;
int *tmp, *tids, *tidsArray;
tidsArray = (int *)malloc(sizeof(int) * CFDMAX_SIZE);
tids = tidsArray;
for (grainsize = 1; grainsize < 48; ++grainsize) {
fprintf(stderr, "Grainsize %d\n", grainsize);
count = tmp_count = num_off = 0;
for (i = 0; i < CFDMAX_SIZE; ++i) {
tids[i] = -1;
}
#pragma omp parallel shared(tids)
{
#pragma omp master
#pragma omp taskloop grainsize(grainsize)
for (i = 0; i < CFDMAX_SIZE; i++) {
tids[i] = omp_get_thread_num();
}
}
for (i = 0; i < CFDMAX_SIZE; ++i) {
if (tids[i] == -1) {
fprintf(stderr, " Iteration %d not touched!\n", i);
result++;
}
}
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tids[i] != tids[i + 1]) {
count++;
}
}
tmp = (int *)malloc(sizeof(int) * (count + 1));
tmp[0] = 1;
for (i = 0; i < CFDMAX_SIZE - 1; ++i) {
if (tmp_count > count) {
printf("--------------------\nTestinternal Error: List too "
"small!!!\n--------------------\n");
break;
}
if (tids[i] != tids[i + 1]) {
tmp_count++;
tmp[tmp_count] = 1;
} else {
tmp[tmp_count]++;
}
}
// is grainsize statement working?
int num_tasks = CFDMAX_SIZE / grainsize;
int multiple1 = CFDMAX_SIZE / num_tasks;
int multiple2 = CFDMAX_SIZE / num_tasks + 1;
for (i = 0; i < count; i++) {
// it is possible for 2 adjacent chunks assigned to a same thread
if (tmp[i] % multiple1 != 0 && tmp[i] % multiple2 != 0) {
num_off++;
}
}
if (num_off > 1) {
fprintf(stderr, " The number of bad chunks is %d\n", num_off);
result++;
} else {
fprintf(stderr, " Everything ok\n");
}
free(tmp);
}
free(tidsArray);
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_taskloop_grainsize()) {
num_failed++;
}
}
return num_failed;
}
|
poismf.c | /*
Poisson Factorization for sparse matrices
Based on alternating proximal gradient iteration or conjugate gradient.
Variables must be initialized from outside the main function ('run_poismf').
Writen for C99 standard and OpenMP 2.0 or later.
Reference paper is:
Cortes, David.
"Fast Non-Bayesian Poisson Factorization for Implicit-Feedback Recommendations."
arXiv preprint arXiv:1811.01908 (2018).
BSD 2-Clause License
Copyright (c) 2018-2021, David Cortes
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "poismf.h"
/* Interrupt handler */
bool should_stop_procedure = false;
bool handle_is_locked = false;
void set_interrup_global_variable(int s)
{
#pragma omp critical
{
fprintf(stderr, "Error: procedure was interrupted\n");
should_stop_procedure = true;
}
}
/* For making sure that it's multi-threaded */
bool get_has_openmp(void)
{
#ifdef _OPENMP
return true;
#else
return false;
#endif
}
/* Helper functions */
#define nonneg(x) (((x) > 0.)? (x) : 0.)
void dscal_large(size_t n, real_t alpha, real_t *restrict x)
{
if (n < (size_t)INT_MAX)
cblas_tscal((int)n, alpha, x, 1);
else {
for (size_t ix = 0; ix < n; ix++)
x[ix] *= alpha;
}
}
void sum_by_cols(real_t *restrict out, real_t *restrict M, size_t nrow, size_t ncol)
{
memset(out, 0, sizeof(real_t) * ncol);
for (size_t row = 0; row < nrow; row++)
for (size_t col = 0; col < ncol; col++)
out[col] += M[row*ncol + col];
}
void adjustment_Bsum
(
real_t *restrict B,
real_t *restrict Bsum,
real_t *restrict Bsum_user,
sparse_ix Xr_indices[],
sparse_ix Xr_indptr[],
size_t dimA, size_t k,
real_t w_mult, int nthreads
)
{
#if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */
long long ix = 0;
long long row = 0;
#else
size_t ix = 0;
size_t row = 0;
#endif
int k_int = (int) k;
memset(Bsum_user, 0, dimA*k*sizeof(real_t));
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(dimA, Xr_indptr, Xr_indices, B, Bsum_user, k_int)
for (row = 0; row < dimA; row++)
for (size_t ix = Xr_indptr[row]; ix < Xr_indptr[row + 1]; ix++)
cblas_taxpy(k_int, 1., B + Xr_indices[ix]*k, 1, Bsum_user + row*k, 1);
size_t n = dimA * k;
real_t new_w = w_mult - 1.;
/* Note: don't use daxpy here as 'n' might be larger than INT_MAX */
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(n, new_w, Bsum_user)
for (ix = 0; ix < n; ix++)
Bsum_user[ix] *= new_w;
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(dimA, k, k_int, Bsum, Bsum_user)
for (row = 0; row < dimA; row++)
cblas_taxpy(k_int, 1., Bsum, 1, Bsum_user + row*k, 1);
}
/* Functions for Proximal Gradient */
void calc_grad_pgd(real_t *out, real_t *curr, real_t *F, real_t *X, sparse_ix *Xind, sparse_ix nnz_this, int k)
{
size_t k_szt = (size_t)k;
memset(out, 0, sizeof(real_t) * (size_t)k);
for (sparse_ix ix = 0; ix < nnz_this; ix++)
cblas_taxpy(k, X[ix] / cblas_tdot(k, F + (size_t)Xind[ix] * k_szt, 1, curr, 1),
F + (size_t)Xind[ix] * k_szt, 1, out, 1);
}
/* This function is written having in mind the A matrix being optimized,
with the B matrix being fixed, and the data passed in row-sparse format.
For optimizing B, swap any mention of A and B, and pass the data in
column-sparse format */
void pg_iteration
(
real_t *A, real_t *B,
real_t *Xr, sparse_ix *Xr_indptr, sparse_ix *Xr_indices,
size_t dimA, size_t k,
real_t cnst_div, real_t *cnst_sum, real_t *Bsum_user,
real_t step_size, real_t w_mult, size_t maxupd,
real_t *buffer_arr, int nthreads
)
{
int k_int = (int) k;
sparse_ix nnz_this;
step_size *= w_mult;
real_t *Bsum = cnst_sum;
#if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */
long long ia;
#endif
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
firstprivate(Bsum) private(nnz_this) \
shared(A, B, k, k_int, cnst_div, Bsum_user, maxupd, Xr, Xr_indptr, Xr_indices)
for (size_t_for ia = 0; ia < dimA; ia++)
{
nnz_this = Xr_indptr[ia + 1] - Xr_indptr[ia];
if (nnz_this == 0) {
memset(A + ia*k, 0, k*sizeof(real_t));
continue;
}
if (w_mult != 1.) Bsum = Bsum_user + ia*k;
for (size_t p = 0; p < maxupd; p++)
{
calc_grad_pgd(buffer_arr + k*omp_get_thread_num(),
A + ia*k, B, Xr + Xr_indptr[ia],
Xr_indices + Xr_indptr[ia], nnz_this, k_int);
cblas_taxpy(k_int, step_size,
buffer_arr + k*omp_get_thread_num(), 1,
A + ia*k, 1);
cblas_taxpy(k_int, 1., Bsum, 1, A + ia*k, 1);
cblas_tscal(k_int, cnst_div, A + ia*k, 1);
for (size_t ix = 0; ix < k; ix++)
A[ia*k + ix] = nonneg(A[ia*k + ix]);
}
}
}
/* Functions for Conjugate Gradient */
/* TODO: when doing line searches without evaluating gradient (e.g. for CG),
this could be computed faster by keeping pt1=B*a_vec; pt2=B*alpha*grad,
and then evaluating only loss(pt1+step*pt2). */
void calc_fun_single(real_t a_row[], int k_int, real_t *f, void *data)
{
fdata* fun_data = (fdata*) data;
size_t k = (size_t)k_int;
real_t reg_term = cblas_tdot(k_int, fun_data->Bsum, 1, a_row, 1);
reg_term += fun_data->l2_reg * cblas_tdot(k_int, a_row, 1, a_row, 1);
real_t lsum = 0.;
for (size_t ix = 0; ix < fun_data->nnz_this; ix++)
{
lsum += fun_data->Xr[ix]
* log( cblas_tdot(k_int, a_row, 1,
fun_data->B + fun_data->X_ind[ix]*k, 1) );
}
*f = reg_term - lsum * fun_data->w_mult;
}
void calc_grad_single(real_t a_row[], int k_int, real_t grad[], void *data)
{
fdata* fun_data = (fdata*) data;
size_t k = (size_t)k_int;
memcpy(grad, fun_data->Bsum, sizeof(real_t) * k);
cblas_taxpy(k_int, 2. * fun_data->l2_reg, a_row, 1, grad, 1);
for (size_t ix = 0; ix < fun_data->nnz_this; ix++)
{
cblas_taxpy(k_int, - fun_data->Xr[ix]
/ cblas_tdot(k_int, a_row, 1,
fun_data->B + fun_data->X_ind[ix]*k, 1),
fun_data->B + fun_data->X_ind[ix]*k, 1, grad, 1);
}
}
void calc_grad_single_w(real_t a_row[], int k_int, real_t grad[], void *data)
{
fdata* fun_data = (fdata*) data;
size_t k = (size_t)k_int;
memset(grad, 0, k*sizeof(real_t));
for (size_t ix = 0; ix < fun_data->nnz_this; ix++)
{
cblas_taxpy(k_int, - fun_data->Xr[ix]
/ cblas_tdot(k_int, a_row, 1,
fun_data->B + fun_data->X_ind[ix]*k, 1),
fun_data->B + fun_data->X_ind[ix]*k, 1, grad, 1);
}
cblas_tscal(k_int, fun_data->w_mult, grad, 1);
cblas_taxpy(k_int, 1., fun_data->Bsum, 1, grad, 1);
cblas_taxpy(k_int, 2. * fun_data->l2_reg, a_row, 1, grad, 1);
}
int calc_fun_and_grad
(
real_t *restrict a_row,
real_t *restrict f,
real_t *restrict grad,
void *data
)
{
fdata *fun_data = (fdata*)data;
int k_int = fun_data->k;
size_t k = (size_t)k_int;
real_t pred;
real_t lsum = 0;
memset(grad, 0, k*sizeof(real_t));
for (size_t ix = 0; ix < fun_data->nnz_this; ix++)
{
pred = cblas_tdot(k_int, a_row, 1, fun_data->B + fun_data->X_ind[ix]*k, 1);
cblas_taxpy(k_int, - fun_data->Xr[ix] / pred,
fun_data->B + fun_data->X_ind[ix]*k, 1, grad, 1);
lsum += fun_data->Xr[ix] * log(pred);
}
if (fun_data->w_mult != 1.)
cblas_tscal(k_int, fun_data->w_mult, grad, 1);
cblas_taxpy(k_int, 1., fun_data->Bsum, 1, grad, 1);
real_t reg_term = cblas_tdot(k_int, fun_data->Bsum, 1, a_row, 1);
cblas_taxpy(k_int, 2. * fun_data->l2_reg, a_row, 1, grad, 1);
*f = reg_term - lsum * fun_data->w_mult;
return 0;
}
void cg_iteration
(
real_t *A, real_t *B,
real_t *Xr, sparse_ix *Xr_indptr, sparse_ix *Xr_indices,
size_t dimA, size_t k, bool limit_step,
real_t *Bsum, real_t l2_reg, real_t w_mult, size_t maxupd,
real_t *buffer_arr, real_t *Bsum_w, int nthreads
)
{
int k_int = (int) k;
fdata data = { B, Bsum, NULL, NULL, 0, l2_reg, w_mult, k_int };
real_t fun_val;
size_t niter;
size_t nfeval;
grad_eval *grad_fun = (w_mult == 1.)? calc_grad_single : calc_grad_single_w;
#if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64))
long long ia;
#endif
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
private(fun_val, niter, nfeval) firstprivate(data) \
shared(dimA, Xr, Xr_indptr, Xr_indices, A, k, k_int, grad_fun)
for (size_t_for ia = 0; ia < dimA; ia++)
{
if (should_stop_procedure)
continue;
data.Xr = Xr + Xr_indptr[ia];
data.X_ind = Xr_indices + Xr_indptr[ia];
data.nnz_this = Xr_indptr[ia + 1] - Xr_indptr[ia];
if (data.nnz_this == 0) {
memset(A + ia*k, 0, k*sizeof(real_t));
continue;
}
if (w_mult != 1.) data.Bsum = Bsum_w + ia*k;
minimize_nonneg_cg(
A + ia*k, k_int, &fun_val,
calc_fun_single, grad_fun, NULL, (void*) &data,
1e-2, 150, maxupd, &niter, &nfeval,
0.25, 0.01, 20, limit_step,
buffer_arr + 5*k*omp_get_thread_num(), 1, 0);
}
}
void tncg_iteration
(
real_t *A, real_t *B, bool reuse_prev,
real_t *Xr, sparse_ix *Xr_indptr, sparse_ix *Xr_indices,
size_t dimA, size_t k,
real_t *Bsum, real_t l2_reg, real_t w_mult, int maxupd,
real_t *buffer_arr, int *buffer_int,
real_t *restrict buffer_unchanged, bool *has_converged,
real_t *zeros_tncg, real_t *inf_tncg,
real_t *Bsum_w, int nthreads
)
{
int k_int = (int) k;
fdata data = { B, Bsum, NULL, NULL, 0, l2_reg, w_mult, k_int };
real_t fun_val = 0;
int niter = 0;
int nfeval = 0;
int maxCGit = (int) fmax(1., fmin(50., (real_t)k/2.));
#if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64))
long long ia;
#endif
*has_converged = false;
size_t n_unchanged = 0;
real_t *prev_values;
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
firstprivate(data) private(niter, nfeval, fun_val, prev_values) \
shared(A, dimA, Bsum_w, k, k_int, zeros_tncg, inf_tncg, \
buffer_arr, buffer_int, Xr, Xr_indices, Xr_indptr, \
maxupd, w_mult) \
reduction(+:n_unchanged)
for (size_t_for ia = 0; ia < dimA; ia++)
{
if (should_stop_procedure)
continue;
data.Xr = Xr + Xr_indptr[ia];
data.X_ind = Xr_indices + Xr_indptr[ia];
data.nnz_this = Xr_indptr[ia + 1] - Xr_indptr[ia];
if (data.nnz_this == 0) {
memset(A + ia*k, 0, k*sizeof(real_t));
continue;
}
if (w_mult != 1.) data.Bsum = Bsum_w + ia*k;
if (buffer_unchanged != NULL) {
prev_values = buffer_unchanged + k*(size_t)omp_get_thread_num();
memcpy(prev_values, A + ia*k, k*sizeof(real_t));
}
if (!reuse_prev)
for (size_t ix = 0; ix < k; ix++)
A[ia*k + ix] = 1e-3;
tnc(k_int, A + ia*k, &fun_val,
buffer_arr + (size_t)omp_get_thread_num()*(size_t)22*k + (size_t)21*k,
calc_fun_and_grad, (void*) &data,
zeros_tncg, inf_tncg, NULL, NULL,
0, maxCGit, maxupd, 0.25, 10.,
0., 0., 1e-4, -1., -1.,
1.3, &nfeval, &niter,
buffer_arr + (size_t)omp_get_thread_num()*(size_t)22*k,
buffer_int + (size_t)omp_get_thread_num()*k);
if (buffer_unchanged != NULL) {
cblas_taxpy(k_int, -1., A + ia*k, 1, prev_values, 1);
n_unchanged += cblas_tdot(k_int, prev_values, 1, prev_values, 1) <= 1e-4;
}
}
/* TODO: better keep an entry-by-entry array of whether they've changed,
then examine skipping them individually instead. */
if (buffer_unchanged != NULL) {
*has_converged = ((double)n_unchanged / (double)dimA) >= .95;
}
}
/* Main function for Proximal Gradient and Conjugate Gradient solvers
A : Pointer to the already-initialized A matrix
(user factors)
Xr, Xr_indptr, Xr_indices : Pointers to the X matrix in row-sparse format
B : Pointer to the already-initialized B matrix
(item factors)
Xc, Xc_indptr, Xc_indices : Pointers to the X matrix in column-sparse format
dimA : Number of rows in the A matrix
dimB : Number of rows in the B matrix
k : Dimensionality for the factorizing matrices
(number of columns of A and B matrices)
l2_reg : Regularization pameter for the L2 norm of the A and B matrices
l1_reg : Regularization pameter for the L1 norm of the A and B matrices
w_mult : Weight multiplier for the positive entries in X
step_size : Initial step size for PGD updates
(will be decreased by 1/2 every iteration - ignored for CG)
method : Which optimization method to use (tncg, cg, pg).
limit_step : Whether to limit CG step sizes to zero-out one variable per step
numiter : Number of iterations for which to run the procedure
maxupd : Number of updates to the same vector per iteration
early_stop : Whether to stop early if the values do not change much after an iteration (TNCG)
reuse_prev : Whether to re-use previous values as starting point (TNCG)
handle_interrupt : Whether to stop gracefully after a SIGINT, returning code 2 instead.
nthreads : Number of threads to use
Matrices A and B are optimized in-place,
and are assumed to be in row-major order.
Returns 0 if it succeeds, 1 if it runs out of memory, 2 if it gets interrupted.
*/
int run_poismf(
real_t *restrict A, real_t *restrict Xr, sparse_ix *restrict Xr_indptr, sparse_ix *restrict Xr_indices,
real_t *restrict B, real_t *restrict Xc, sparse_ix *restrict Xc_indptr, sparse_ix *restrict Xc_indices,
const size_t dimA, const size_t dimB, const size_t k,
const real_t l2_reg, const real_t l1_reg, const real_t w_mult, real_t step_size,
const Method method, const bool limit_step, const size_t numiter, const size_t maxupd,
const bool early_stop, const bool reuse_prev,
const bool handle_interrupt, const int nthreads)
{
sig_t_ old_interrupt_handle = NULL;
bool has_lock_on_handle = false;
#pragma omp critical
{
if (!handle_is_locked)
{
handle_is_locked = true;
has_lock_on_handle = true;
should_stop_procedure = false;
old_interrupt_handle = signal(SIGINT, set_interrup_global_variable);
}
}
real_t *cnst_sum = (real_t*) malloc(sizeof(real_t) * k);
real_t cnst_div;
int k_int = (int) k;
real_t neg_step_sz = -step_size;
size_t size_buffer = 1;
switch(method) {
case pg: {size_buffer = 1; break;}
case cg: {size_buffer = 5; break;}
case tncg: {size_buffer = 22; break;}
}
size_buffer *= (k * (size_t)nthreads);
real_t *buffer_arr = (real_t*) malloc(sizeof(real_t) * size_buffer);
real_t *Bsum_w = NULL;
int *buffer_int = NULL;
real_t *zeros_tncg = NULL;
real_t *inf_tncg = NULL;
real_t *buffer_unchanged = NULL;
bool stopped_earlyA = false, stopped_earlyB = false;
int ret_code = 0;
if (w_mult != 1.) {
Bsum_w = (real_t*)malloc(sizeof(real_t) * k * ((dimA > dimB)? dimA : dimB));
if (Bsum_w == NULL) goto throw_oom;
}
if (method == tncg) {
buffer_int = (int*)malloc(sizeof(int) * k *(size_t)nthreads);
zeros_tncg = (real_t*)calloc(sizeof(real_t), k);
inf_tncg = (real_t*)malloc(sizeof(real_t) * k);
if (buffer_int == NULL || zeros_tncg == NULL || inf_tncg == NULL)
goto throw_oom;
if (early_stop) {
buffer_unchanged = (real_t*)malloc((size_t)nthreads*k*sizeof(real_t));
if (buffer_unchanged == NULL)
goto throw_oom;
}
for (size_t ix = 0; ix < k; ix++)
inf_tncg[ix] = HUGE_VAL;
}
if (buffer_arr == NULL || cnst_sum == NULL)
{
throw_oom:
fprintf(stderr, "Error: out of memory.\n");
ret_code = 1;
goto cleanup;
}
for (size_t fulliter = 0; fulliter < numiter; fulliter++){
if (should_stop_procedure) goto cleanup;
/* Constants to use later */
cnst_div = 1. / (1. + 2. * l2_reg * step_size);
sum_by_cols(cnst_sum, A, dimA, k);
if (l1_reg > 0.)
for (size_t kk = 0; kk < k; kk++) cnst_sum[kk] += l1_reg;
if (w_mult != 1.)
adjustment_Bsum(A, cnst_sum, Bsum_w,
Xc_indices, Xc_indptr, dimB, k,
w_mult, nthreads);
switch(method) {
case pg:
{
if (w_mult == 1.)
cblas_tscal(k_int, neg_step_sz, cnst_sum, 1);
else
dscal_large(dimB*k, neg_step_sz, Bsum_w);
pg_iteration(B, A, Xc, Xc_indptr, Xc_indices,
dimB, k, cnst_div, cnst_sum, Bsum_w, step_size,
w_mult, maxupd, buffer_arr, nthreads);
/* Decrease step size after taking PGD steps in both matrices */
step_size *= 0.5;
neg_step_sz = -step_size;
break;
}
case cg:
{
cg_iteration(B, A, Xc, Xc_indptr, Xc_indices,
dimB, k, limit_step, cnst_sum,
l2_reg, w_mult, maxupd,
buffer_arr, Bsum_w, nthreads);
break;
}
case tncg:
{
if (!stopped_earlyB)
tncg_iteration(B, A, reuse_prev, Xc, Xc_indptr, Xc_indices,
dimB, k, cnst_sum, l2_reg, w_mult, maxupd,
buffer_arr, buffer_int,
buffer_unchanged, &stopped_earlyB,
zeros_tncg, inf_tncg,
Bsum_w, nthreads);
break;
}
}
if (should_stop_procedure) goto cleanup;
/* Same procedure repeated for the A matrix */
sum_by_cols(cnst_sum, B, dimB, k);
if (l1_reg > 0.)
for (size_t kk = 0; kk < k; kk++) cnst_sum[kk] += l1_reg;
if (w_mult != 1.)
adjustment_Bsum(B, cnst_sum, Bsum_w,
Xr_indices, Xr_indptr, dimA, k,
w_mult, nthreads);
switch (method) {
case pg:
{
if (w_mult == 1.)
cblas_tscal(k_int, neg_step_sz, cnst_sum, 1);
else
dscal_large(dimA*k, neg_step_sz, Bsum_w);
cblas_tscal(k_int, neg_step_sz, cnst_sum, 1);
pg_iteration(A, B, Xr, Xr_indptr, Xr_indices,
dimA, k, cnst_div, cnst_sum, Bsum_w, step_size,
w_mult, maxupd, buffer_arr, nthreads);
break;
}
case cg:
{
cg_iteration(A, B, Xr, Xr_indptr, Xr_indices,
dimA, k, limit_step, cnst_sum,
l2_reg, w_mult, maxupd,
buffer_arr, Bsum_w, nthreads);
break;
}
case tncg:
{
if (!stopped_earlyA)
tncg_iteration(A, B, reuse_prev, Xr, Xr_indptr, Xr_indices,
dimA, k, cnst_sum, l2_reg, w_mult, maxupd,
buffer_arr, buffer_int,
buffer_unchanged, &stopped_earlyA,
zeros_tncg, inf_tncg,
Bsum_w, nthreads);
break;
}
}
if (stopped_earlyA && stopped_earlyB)
break;
}
cleanup:
free(cnst_sum);
free(buffer_arr);
free(buffer_int);
free(Bsum_w);
free(buffer_unchanged);
free(zeros_tncg);
free(inf_tncg);
#pragma omp critical
{
bool should_stop_procedure_local = should_stop_procedure;
if (should_stop_procedure_local && ret_code != 1)
ret_code = 2;
if (has_lock_on_handle) {
signal(SIGINT, old_interrupt_handle);
handle_is_locked = false;
should_stop_procedure = false;
}
if (should_stop_procedure_local && !handle_interrupt)
raise(SIGINT);
}
return ret_code;
}
|
spotfit.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "pix.h"
/*-------------------------------------------------------------------------
*
* Functions to fit the image pixels to locate spot coordinate.
*
*------------------------------------------------------------------------*/
typedef struct {
double *x_fit;
double *y_fit;
int *intensity;
} fitIntensity_t;
//
// r0: full width of the spots
//
static void
fit_gaussian2D(int n_data, void *data, double *a, double *f, double *dfda) {
fitIntensity_t *d = (fitIntensity_t *)data;
double I0, x0, y0, r0, B0;
double *x, *y, dx, dy, ee;
int *II, i;
I0 = a[0];
x0 = a[1];
y0 = a[2];
r0 = a[3];
B0 = a[4];
x = d->x_fit;
y = d->y_fit;
II = d->intensity;
// Note that the weighting "sig" is set to 1, so it does not appear here.
// #pragma omp parallel for private(i,dx,dy,ee)
for (i=0; i < n_data; i++) {
dx = x[i]-x0;
dy = y[i]-y0;
ee = exp(-2.0*(dx*dx+dy*dy)/(r0*r0));
f[i] = I0*ee+B0 - (double)II[i];
dfda[i*5+0] = ee;
dfda[i*5+1] = I0*ee*(4.0*dx/(r0*r0));
dfda[i*5+2] = I0*ee*(4.0*dy/(r0*r0));
dfda[i*5+3] = I0*ee*(4.0*(dx*dx+dy*dy)/(r0*r0*r0));
dfda[i*5+4] = 1.0;
}
}
//
// wx, wy: full width of the spots
//
static void
fit_gaussian3D(int n_data, void *data, double *a, double *f, double *dfda) {
fitIntensity_t *d = (fitIntensity_t *)data;
double I0, x0, y0, wx, wy, B0;
double *x, *y, dx, dy, ee;
int *II, i;
I0 = a[0];
x0 = a[1];
y0 = a[2];
wx = a[3];
wy = a[4];
B0 = a[5];
x = d->x_fit;
y = d->y_fit;
II = d->intensity;
// Note that the weighting "sig" is set to 1, so it does not appear here.
// #pragma omp parallel for private(i,dx,dy,ee)
for (i=0; i < n_data; i++) {
dx = x[i]-x0;
dy = y[i]-y0;
ee = exp(-(2.0*dx*dx)/(wx*wx) - (2.0*dy*dy)/(wy*wy));
f[i] = I0*ee+B0 - (double)II[i];
dfda[i*6+0] = ee;
dfda[i*6+1] = I0*ee*(4.0*dx/(wx*wx));
dfda[i*6+2] = I0*ee*(4.0*dy/(wy*wy));
dfda[i*6+3] = I0*ee*((4.0*dx*dx)/(wx*wx*wx));
dfda[i*6+4] = I0*ee*((4.0*dy*dy)/(wy*wy*wy));
dfda[i*6+5] = 1.0;
}
}
/*-------------------------------------------------------------------------
*
* Collect the fitting results.
*
*------------------------------------------------------------------------*/
static void fit_result(sp_t *sp, double *a, double *da, double chisq) {
int i;
if ((sp->res = calloc(sizeof(fitres_t), 1)) == NULL)
pstop("!!! fit_result: not enough memory.\n");
for (i=0; i < 6; i++) {
sp->res->a[i] = a[i];
sp->res->da[i] = da[i];
sp->res->chisq = chisq;
}
}
/*-------------------------------------------------------------------------
*
* Fit the Spots coordinates.
*
*------------------------------------------------------------------------*/
int SpotFit(para_t *p, double *x_fit, double *y_fit, sp_t *sp) {
int na, mloop, imglen;
int *intensity, imax, imin;
double a[10], da[10], chisq, tol;
fitIntensity_t fdata;
void (*fitfunc)(int, void *, double *, double *, double *);
if (sp == NULL) return -1;
sp->res = NULL;
// Initial parameters for fitting.
intensity = sp->img;
imglen = p->x_find_pixels * p->y_find_pixels;
mloop = 100; // maxloop for fitting.
tol = 1.E-5; // stopping criteria for fitting.
if (p->mode == 0) {
na = 5;
fitfunc = fit_gaussian2D;
}else {
na = 6;
fitfunc = fit_gaussian3D;
}
// Prepare initial parameters for fitting.
vmaxmin_i(imglen, intensity, &imax, NULL, &imin, NULL);
if (p->mode == 0) {
a[0] = (double)(imax-imin); // initial parameter: intensity.
a[1] = 0.0; // initial parameter: x0.
a[2] = 0.0; // initial parameter: y0.
a[3] = 1.0; // initial parameter: Gaussian width.
a[4] = (double)imin; // initial parameter: background.
}else {
a[0] = (double)(imax-imin); // initial parameter: intensity.
a[1] = 0.0; // initial parameter: x0.
a[2] = 0.0; // initial parameter: y0.
a[3] = 1.0; // initial parameter: wx.
a[4] = 1.0; // initial parameter: wy.
a[5] = (double)imin; // initial parameter: background.
}
fdata.x_fit = x_fit;
fdata.y_fit = y_fit;
fdata.intensity = intensity;
if (nlinfit(imglen, fitfunc, &fdata, na, a, da, &chisq, tol,
mloop, p->verb) != 0)
return -1;
// Adjust the spot coordinates and parameters from the fitting results.
a[1] = (double)(sp->x) + a[1];
a[2] = (double)(sp->y) + a[2];
if (p->mode == 0) {
if (a[0]<0.0 || a[1]<0.0 || a[2]<0.0 || a[4]<0.0) return -1;
if (a[1] > p->max_x || a[2] > p->max_y) return -1;
if (da[1] > p->max_dx || da[2] > p->max_dy || da[3] > p->max_dwx ||
a[0]/a[4] < p->min_SN || da[0]/a[0] > p->max_dI_I) return -1;
a[3] = fabs(a[3]);
} else {
if (a[0]<0.0 || a[1]<0.0 || a[2]<0.0 || a[5]<0.0) return -1;
if (a[1] > p->max_x || a[2] > p->max_y) return -1;
if (da[1] > p->max_dx || da[2] > p->max_dy ||
da[3] > p->max_dwx || da[4] > p->max_dwy ||
a[0]/a[5] < p->min_SN || da[0]/a[0] > p->max_dI_I) return -1;
a[3] = fabs(a[3]);
a[4] = fabs(a[4]);
}
fit_result(sp, a, da, chisq);
return 0;
}
|
GB_unaryop__identity_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_uint8
// op(A') function: GB_tran__identity_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_uint8
(
uint8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
// There are no parameters for this operator.
// Hence, no arameter registration.
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TShape &shp = (*in_attrs)[0];
if (!shp.ndim()) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<int req>
struct totensor_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(uint32_t c, float* out_data, const DType* in_data,
const int length, const int channel, const int step,
const float normalize_factor = 255.0f) {
#pragma omp parallel for
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channel + c]) / normalize_factor);
}
}
};
template<typename xpu>
void ToTensorImpl(const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const uint32_t channel,
const int step = 0) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
mxnet_op::Kernel<totensor_forward<req_type>, xpu>::Launch(
s, channel, output, input, length, channel, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
// 3D Input - (h, w, c)
if (inputs[0].ndim() == 3) {
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const uint32_t channel = inputs[0].shape_[2];
ToTensorImpl<xpu>(ctx, inputs, outputs, req, length, channel);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const uint32_t channel = inputs[0].shape_[3];
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl<xpu>(ctx, inputs, outputs, req, length, channel, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
nnvm::Tuple<float> mean;
nnvm::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(nnvm::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(nnvm::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
uint32_t nchannels;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<int req>
struct normalize_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(int j, DType* out_data, const DType* in_data,
const int i, const int length, const int step,
const DType mean, const DType std_dev) {
KERNEL_ASSIGN(out_data[step + i*length + j], req,
(in_data[step + i*length + j] - mean) / std_dev);
}
};
template<typename xpu>
void NormalizeImpl(const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const NormalizeParam ¶m,
const int length,
const uint32_t channel,
const int step = 0) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (uint32_t i = 0; i < channel; ++i) {
DType mean = param.mean[param.mean.ndim() > i ? i : 0];
DType std_dev = param.std[param.std.ndim() > i ? i : 0];
mxnet_op::Kernel<normalize_forward<req_type>, xpu>::Launch(
s, length, output, input,
i, length, step, mean, std_dev);
}
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// 3D input (c, h, w)
if (inputs[0].ndim() == 3) {
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const uint32_t channel = inputs[0].shape_[0];
NormalizeImpl<xpu>(ctx, inputs, outputs, req, param, length, channel);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const uint32_t channel = inputs[0].shape_[1];
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl<xpu>(ctx, inputs, outputs, req, param, length, channel, n*step);
}
}
}
// Backward function
template<int req>
struct normalize_backward {
template<typename DType>
MSHADOW_XINLINE static void Map(int j, DType* in_grad, const DType* out_grad,
const int i, const int length,
const int step, const DType std_dev) {
// d/dx{(x - mean) / std_dev} => (1 / std_dev)
KERNEL_ASSIGN(in_grad[step + i*length + j], req,
out_grad[step + i*length + j] * (1.0 / std_dev));
}
};
template<typename xpu>
void NormalizeBackwardImpl(const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const NormalizeParam ¶m,
const int length,
const uint32_t channel,
const int step = 0) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& out_grad = inputs[0];
const TBlob& in_grad = outputs[0];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
for (uint32_t i = 0; i < channel; ++i) {
DType std_dev = param.std[param.std.ndim() > i ? i : 0];
mxnet_op::Kernel<normalize_backward<req_type>, xpu>::Launch(
s, length, in_grad.dptr<DType>(), out_grad.dptr<DType>(),
i, length, step, std_dev);
}
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
// 3D input (c, h, w)
if (in_data.ndim() == 3) {
const int length = in_data.shape_[1] * in_data.shape_[2];
const uint32_t channel = in_data.shape_[0];
NormalizeBackwardImpl<xpu>(ctx, inputs, outputs, req, param, length, channel);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const uint32_t channel = in_data.shape_[1];
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl<xpu>(ctx, inputs, outputs, req, param, length, channel, n*step);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (uint32_t i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
for (int j = 0; j < (mid >> 1); ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
} else if (h >= 6) {
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
nnvm::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const nnvm::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
w9_e2_errors.c |
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main(int argc, char const *argv[]) {
int i, N=100000;
double *u = malloc(N * sizeof *u);
double *v = malloc(N * sizeof *v);
for (i=0; i<N; i++) {
u[i] = 0.001*(i - N/2);
}
v[0] = u[0];
v[N-1] = u[N-1];
#pragma omp parallel default(shared) // default(shared) does nothing.
{
int time_step;
double *tmp;
for (time_step=0; time_step<1000; time_step++)
{
#pragma omp for
for (i=1; i<N-1; i++)
v[i] = u[i-1] - 2*u[i] + u[i+1];
#pragma omp single
{
tmp = v;
v=u;
u=tmp;
}
}
}
return 0;
}
|
feature.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF EEEEE AAA TTTTT U U RRRR EEEEE %
% F E A A T U U R R E %
% FFF EEE AAAAA T U U RRRR EEE %
% F E A A T U U R R E %
% F EEEEE A A T UUU R R EEEEE %
% %
% %
% MagickCore Image Feature Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/feature.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a n n y E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of
% edges in images.
%
% The format of the CannyEdgeImage method is:
%
% Image *CannyEdgeImage(const Image *image,const double radius,
% const double sigma,const double lower_percent,
% const double upper_percent,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the gaussian smoothing filter.
%
% o sigma: the sigma of the gaussian smoothing filter.
%
% o lower_percent: percentage of edge pixels in the lower threshold.
%
% o upper_percent: percentage of edge pixels in the upper threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _CannyInfo
{
double
magnitude,
intensity;
int
orientation;
ssize_t
x,
y;
} CannyInfo;
static inline MagickBooleanType IsAuthenticPixel(const Image *image,
const ssize_t x,const ssize_t y)
{
if ((x < 0) || (x >= (ssize_t) image->columns))
return(MagickFalse);
if ((y < 0) || (y >= (ssize_t) image->rows))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view,
MatrixInfo *canny_cache,const ssize_t x,const ssize_t y,
const double lower_threshold,ExceptionInfo *exception)
{
CannyInfo
edge,
pixel;
MagickBooleanType
status;
register Quantum
*q;
register ssize_t
i;
q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
edge.x=x;
edge.y=y;
if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse)
return(MagickFalse);
for (i=1; i != 0; )
{
ssize_t
v;
i--;
status=GetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
for (v=(-1); v <= 1; v++)
{
ssize_t
u;
for (u=(-1); u <= 1; u++)
{
if ((u == 0) && (v == 0))
continue;
if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse)
continue;
/*
Not an edge if gradient value is below the lower threshold.
*/
q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1,
exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel);
if (status == MagickFalse)
return(MagickFalse);
if ((GetPixelIntensity(edge_image,q) == 0.0) &&
(pixel.intensity >= lower_threshold))
{
*q=QuantumRange;
status=SyncCacheViewAuthenticPixels(edge_view,exception);
if (status == MagickFalse)
return(MagickFalse);
edge.x+=u;
edge.y+=v;
status=SetMatrixElement(canny_cache,i,0,&edge);
if (status == MagickFalse)
return(MagickFalse);
i++;
}
}
}
}
return(MagickTrue);
}
MagickExport Image *CannyEdgeImage(const Image *image,const double radius,
const double sigma,const double lower_percent,const double upper_percent,
ExceptionInfo *exception)
{
#define CannyEdgeImageTag "CannyEdge/Image"
CacheView
*edge_view;
CannyInfo
element;
char
geometry[MagickPathExtent];
double
lower_threshold,
max,
min,
upper_threshold;
Image
*edge_image;
KernelInfo
*kernel_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*canny_cache;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Filter out noise.
*/
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception);
/*
Find the intensity gradient of the image.
*/
canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows,
sizeof(CannyInfo),exception);
if (canny_cache == (MatrixInfo *) NULL)
{
edge_image=DestroyImage(edge_image);
return((Image *) NULL);
}
status=MagickTrue;
edge_view=AcquireVirtualCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
double
dx,
dy;
register const Quantum
*magick_restrict kernel_pixels;
ssize_t
v;
static double
Gx[2][2] =
{
{ -1.0, +1.0 },
{ -1.0, +1.0 }
},
Gy[2][2] =
{
{ +1.0, +1.0 },
{ -1.0, -1.0 }
};
(void) memset(&pixel,0,sizeof(pixel));
dx=0.0;
dy=0.0;
kernel_pixels=p;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
double
intensity;
intensity=GetPixelIntensity(edge_image,kernel_pixels+u);
dx+=0.5*Gx[v][u]*intensity;
dy+=0.5*Gy[v][u]*intensity;
}
kernel_pixels+=edge_image->columns+1;
}
pixel.magnitude=hypot(dx,dy);
pixel.orientation=0;
if (fabs(dx) > MagickEpsilon)
{
double
slope;
slope=dy/dx;
if (slope < 0.0)
{
if (slope < -2.41421356237)
pixel.orientation=0;
else
if (slope < -0.414213562373)
pixel.orientation=1;
else
pixel.orientation=2;
}
else
{
if (slope > 2.41421356237)
pixel.orientation=0;
else
if (slope > 0.414213562373)
pixel.orientation=3;
else
pixel.orientation=2;
}
}
if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse)
continue;
p+=GetPixelChannels(edge_image);
}
}
edge_view=DestroyCacheView(edge_view);
/*
Non-maxima suppression, remove pixels that are not considered to be part
of an edge.
*/
progress=0;
(void) GetMatrixElement(canny_cache,0,0,&element);
max=element.intensity;
min=element.intensity;
edge_view=AcquireAuthenticCacheView(edge_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(edge_image,edge_image,edge_image->rows,1)
#endif
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
alpha_pixel,
beta_pixel,
pixel;
(void) GetMatrixElement(canny_cache,x,y,&pixel);
switch (pixel.orientation)
{
case 0:
default:
{
/*
0 degrees, north and south.
*/
(void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel);
break;
}
case 1:
{
/*
45 degrees, northwest and southeast.
*/
(void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel);
break;
}
case 2:
{
/*
90 degrees, east and west.
*/
(void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel);
(void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel);
break;
}
case 3:
{
/*
135 degrees, northeast and southwest.
*/
(void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel);
(void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel);
break;
}
}
pixel.intensity=pixel.magnitude;
if ((pixel.magnitude < alpha_pixel.magnitude) ||
(pixel.magnitude < beta_pixel.magnitude))
pixel.intensity=0;
(void) SetMatrixElement(canny_cache,x,y,&pixel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
{
if (pixel.intensity < min)
min=pixel.intensity;
if (pixel.intensity > max)
max=pixel.intensity;
}
*q=0;
q+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse)
status=MagickFalse;
}
edge_view=DestroyCacheView(edge_view);
/*
Estimate hysteresis threshold.
*/
lower_threshold=lower_percent*(max-min)+min;
upper_threshold=upper_percent*(max-min)+min;
/*
Hysteresis threshold.
*/
edge_view=AcquireAuthenticCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
CannyInfo
pixel;
register const Quantum
*magick_restrict p;
/*
Edge if pixel gradient higher than upper threshold.
*/
p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
continue;
status=GetMatrixElement(canny_cache,x,y,&pixel);
if (status == MagickFalse)
continue;
if ((GetPixelIntensity(edge_image,p) == 0.0) &&
(pixel.intensity >= upper_threshold))
status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold,
exception);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
edge_view=DestroyCacheView(edge_view);
/*
Free resources.
*/
canny_cache=DestroyMatrixInfo(canny_cache);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e F e a t u r e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageFeatures() returns features for each channel in the image in
% each of four directions (horizontal, vertical, left and right diagonals)
% for the specified distance. The features include the angular second
% moment, contrast, correlation, sum of squares: variance, inverse difference
% moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information
% measures of correlation 2, and maximum correlation coefficient. You can
% access the red channel contrast, for example, like this:
%
% channel_features=GetImageFeatures(image,1,exception);
% contrast=channel_features[RedPixelChannel].contrast[0];
%
% Use MagickRelinquishMemory() to free the features buffer.
%
% The format of the GetImageFeatures method is:
%
% ChannelFeatures *GetImageFeatures(const Image *image,
% const size_t distance,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o distance: the distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelFeatures *GetImageFeatures(const Image *image,
const size_t distance,ExceptionInfo *exception)
{
typedef struct _ChannelStatistics
{
PixelInfo
direction[4]; /* horizontal, vertical, left and right diagonals */
} ChannelStatistics;
CacheView
*image_view;
ChannelFeatures
*channel_features;
ChannelStatistics
**cooccurrence,
correlation,
*density_x,
*density_xy,
*density_y,
entropy_x,
entropy_xy,
entropy_xy1,
entropy_xy2,
entropy_y,
mean,
**Q,
*sum,
sum_squares,
variance;
PixelPacket
gray,
*grays;
MagickBooleanType
status;
register ssize_t
i,
r;
size_t
length;
unsigned int
number_grays;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns < (distance+1)) || (image->rows < (distance+1)))
return((ChannelFeatures *) NULL);
length=MaxPixelChannels+1UL;
channel_features=(ChannelFeatures *) AcquireQuantumMemory(length,
sizeof(*channel_features));
if (channel_features == (ChannelFeatures *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_features,0,length*
sizeof(*channel_features));
/*
Form grays.
*/
grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays));
if (grays == (PixelPacket *) NULL)
{
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
for (i=0; i <= (ssize_t) MaxMap; i++)
{
grays[i].red=(~0U);
grays[i].green=(~0U);
grays[i].blue=(~0U);
grays[i].alpha=(~0U);
grays[i].black=(~0U);
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
grays[ScaleQuantumToMap(GetPixelRed(image,p))].red=
ScaleQuantumToMap(GetPixelRed(image,p));
grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green=
ScaleQuantumToMap(GetPixelGreen(image,p));
grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue=
ScaleQuantumToMap(GetPixelBlue(image,p));
if (image->colorspace == CMYKColorspace)
grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black=
ScaleQuantumToMap(GetPixelBlack(image,p));
if (image->alpha_trait != UndefinedPixelTrait)
grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha=
ScaleQuantumToMap(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
return(channel_features);
}
(void) memset(&gray,0,sizeof(gray));
for (i=0; i <= (ssize_t) MaxMap; i++)
{
if (grays[i].red != ~0U)
grays[gray.red++].red=grays[i].red;
if (grays[i].green != ~0U)
grays[gray.green++].green=grays[i].green;
if (grays[i].blue != ~0U)
grays[gray.blue++].blue=grays[i].blue;
if (image->colorspace == CMYKColorspace)
if (grays[i].black != ~0U)
grays[gray.black++].black=grays[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
if (grays[i].alpha != ~0U)
grays[gray.alpha++].alpha=grays[i].alpha;
}
/*
Allocate spatial dependence matrix.
*/
number_grays=gray.red;
if (gray.green > number_grays)
number_grays=gray.green;
if (gray.blue > number_grays)
number_grays=gray.blue;
if (image->colorspace == CMYKColorspace)
if (gray.black > number_grays)
number_grays=gray.black;
if (image->alpha_trait != UndefinedPixelTrait)
if (gray.alpha > number_grays)
number_grays=gray.alpha;
cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays,
sizeof(*cooccurrence));
density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_x));
density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_xy));
density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1),
sizeof(*density_y));
Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q));
sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum));
if ((cooccurrence == (ChannelStatistics **) NULL) ||
(density_x == (ChannelStatistics *) NULL) ||
(density_xy == (ChannelStatistics *) NULL) ||
(density_y == (ChannelStatistics *) NULL) ||
(Q == (ChannelStatistics **) NULL) ||
(sum == (ChannelStatistics *) NULL))
{
if (Q != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
}
if (sum != (ChannelStatistics *) NULL)
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
if (density_y != (ChannelStatistics *) NULL)
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
if (density_xy != (ChannelStatistics *) NULL)
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
if (density_x != (ChannelStatistics *) NULL)
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
if (cooccurrence != (ChannelStatistics **) NULL)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(
cooccurrence);
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
(void) memset(&correlation,0,sizeof(correlation));
(void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x));
(void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy));
(void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y));
(void) memset(&mean,0,sizeof(mean));
(void) memset(sum,0,number_grays*sizeof(*sum));
(void) memset(&sum_squares,0,sizeof(sum_squares));
(void) memset(density_xy,0,2*number_grays*sizeof(*density_xy));
(void) memset(&entropy_x,0,sizeof(entropy_x));
(void) memset(&entropy_xy,0,sizeof(entropy_xy));
(void) memset(&entropy_xy1,0,sizeof(entropy_xy1));
(void) memset(&entropy_xy2,0,sizeof(entropy_xy2));
(void) memset(&entropy_y,0,sizeof(entropy_y));
(void) memset(&variance,0,sizeof(variance));
for (i=0; i < (ssize_t) number_grays; i++)
{
cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,
sizeof(**cooccurrence));
Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q));
if ((cooccurrence[i] == (ChannelStatistics *) NULL) ||
(Q[i] == (ChannelStatistics *) NULL))
break;
(void) memset(cooccurrence[i],0,number_grays*
sizeof(**cooccurrence));
(void) memset(Q[i],0,number_grays*sizeof(**Q));
}
if (i < (ssize_t) number_grays)
{
for (i--; i >= 0; i--)
{
if (Q[i] != (ChannelStatistics *) NULL)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
if (cooccurrence[i] != (ChannelStatistics *) NULL)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
}
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
grays=(PixelPacket *) RelinquishMagickMemory(grays);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Initialize spatial dependence matrix.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
for (r=0; r < (ssize_t) image->rows; r++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
ssize_t
offset,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+
2*distance,distance+2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=distance*GetPixelChannels(image);;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
offset=(ssize_t) distance;
break;
}
case 1:
{
/*
Vertical adjacency.
*/
offset=(ssize_t) (image->columns+2*distance);
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
offset=(ssize_t) ((image->columns+2*distance)+distance);
break;
}
}
u=0;
v=0;
while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p)))
u++;
while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].red++;
cooccurrence[v][u].direction[i].red++;
u=0;
v=0;
while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p)))
u++;
while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].green++;
cooccurrence[v][u].direction[i].green++;
u=0;
v=0;
while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p)))
u++;
while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].blue++;
cooccurrence[v][u].direction[i].blue++;
if (image->colorspace == CMYKColorspace)
{
u=0;
v=0;
while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p)))
u++;
while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].black++;
cooccurrence[v][u].direction[i].black++;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
u=0;
v=0;
while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p)))
u++;
while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image))))
v++;
cooccurrence[u][v].direction[i].alpha++;
cooccurrence[v][u].direction[i].alpha++;
}
}
p+=GetPixelChannels(image);
}
}
grays=(PixelPacket *) RelinquishMagickMemory(grays);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
channel_features=(ChannelFeatures *) RelinquishMagickMemory(
channel_features);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(channel_features);
}
/*
Normalize spatial dependence matrix.
*/
for (i=0; i < 4; i++)
{
double
normalize;
register ssize_t
y;
switch (i)
{
case 0:
default:
{
/*
Horizontal adjacency.
*/
normalize=2.0*image->rows*(image->columns-distance);
break;
}
case 1:
{
/*
Vertical adjacency.
*/
normalize=2.0*(image->rows-distance)*image->columns;
break;
}
case 2:
{
/*
Right diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
case 3:
{
/*
Left diagonal adjacency.
*/
normalize=2.0*(image->rows-distance)*(image->columns-distance);
break;
}
}
normalize=PerceptibleReciprocal(normalize);
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
cooccurrence[x][y].direction[i].red*=normalize;
cooccurrence[x][y].direction[i].green*=normalize;
cooccurrence[x][y].direction[i].blue*=normalize;
if (image->colorspace == CMYKColorspace)
cooccurrence[x][y].direction[i].black*=normalize;
if (image->alpha_trait != UndefinedPixelTrait)
cooccurrence[x][y].direction[i].alpha*=normalize;
}
}
}
/*
Compute texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Angular second moment: measure of homogeneity of the image.
*/
channel_features[RedPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].red*
cooccurrence[x][y].direction[i].red;
channel_features[GreenPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].green*
cooccurrence[x][y].direction[i].green;
channel_features[BluePixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].blue*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].black*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].angular_second_moment[i]+=
cooccurrence[x][y].direction[i].alpha*
cooccurrence[x][y].direction[i].alpha;
/*
Correlation: measure of linear-dependencies in the image.
*/
sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha;
correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red;
correlation.direction[i].green+=x*y*
cooccurrence[x][y].direction[i].green;
correlation.direction[i].blue+=x*y*
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
correlation.direction[i].black+=x*y*
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
correlation.direction[i].alpha+=x*y*
cooccurrence[x][y].direction[i].alpha;
/*
Inverse Difference Moment.
*/
channel_features[RedPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1);
channel_features[GreenPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1);
channel_features[BluePixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].inverse_difference_moment[i]+=
cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1);
/*
Sum average.
*/
density_xy[y+x+2].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[y+x+2].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[y+x+2].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[y+x+2].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[y+x+2].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Entropy.
*/
channel_features[RedPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
channel_features[GreenPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
channel_features[BluePixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].entropy[i]-=
cooccurrence[x][y].direction[i].alpha*
MagickLog10(cooccurrence[x][y].direction[i].alpha);
/*
Information Measures of Correlation.
*/
density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->alpha_trait != UndefinedPixelTrait)
density_x[x].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
if (image->colorspace == CMYKColorspace)
density_x[x].direction[i].black+=
cooccurrence[x][y].direction[i].black;
density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red;
density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green;
density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_y[y].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_y[y].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
mean.direction[i].red+=y*sum[y].direction[i].red;
sum_squares.direction[i].red+=y*y*sum[y].direction[i].red;
mean.direction[i].green+=y*sum[y].direction[i].green;
sum_squares.direction[i].green+=y*y*sum[y].direction[i].green;
mean.direction[i].blue+=y*sum[y].direction[i].blue;
sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
{
mean.direction[i].black+=y*sum[y].direction[i].black;
sum_squares.direction[i].black+=y*y*sum[y].direction[i].black;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
mean.direction[i].alpha+=y*sum[y].direction[i].alpha;
sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha;
}
}
/*
Correlation: measure of linear-dependencies in the image.
*/
channel_features[RedPixelChannel].correlation[i]=
(correlation.direction[i].red-mean.direction[i].red*
mean.direction[i].red)/(sqrt(sum_squares.direction[i].red-
(mean.direction[i].red*mean.direction[i].red))*sqrt(
sum_squares.direction[i].red-(mean.direction[i].red*
mean.direction[i].red)));
channel_features[GreenPixelChannel].correlation[i]=
(correlation.direction[i].green-mean.direction[i].green*
mean.direction[i].green)/(sqrt(sum_squares.direction[i].green-
(mean.direction[i].green*mean.direction[i].green))*sqrt(
sum_squares.direction[i].green-(mean.direction[i].green*
mean.direction[i].green)));
channel_features[BluePixelChannel].correlation[i]=
(correlation.direction[i].blue-mean.direction[i].blue*
mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue-
(mean.direction[i].blue*mean.direction[i].blue))*sqrt(
sum_squares.direction[i].blue-(mean.direction[i].blue*
mean.direction[i].blue)));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].correlation[i]=
(correlation.direction[i].black-mean.direction[i].black*
mean.direction[i].black)/(sqrt(sum_squares.direction[i].black-
(mean.direction[i].black*mean.direction[i].black))*sqrt(
sum_squares.direction[i].black-(mean.direction[i].black*
mean.direction[i].black)));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].correlation[i]=
(correlation.direction[i].alpha-mean.direction[i].alpha*
mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha-
(mean.direction[i].alpha*mean.direction[i].alpha))*sqrt(
sum_squares.direction[i].alpha-(mean.direction[i].alpha*
mean.direction[i].alpha)));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=2; x < (ssize_t) (2*number_grays); x++)
{
/*
Sum average.
*/
channel_features[RedPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_average[i]+=
x*density_xy[x].direction[i].alpha;
/*
Sum entropy.
*/
channel_features[RedPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Sum variance.
*/
channel_features[RedPixelChannel].sum_variance[i]+=
(x-channel_features[RedPixelChannel].sum_entropy[i])*
(x-channel_features[RedPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].red;
channel_features[GreenPixelChannel].sum_variance[i]+=
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
(x-channel_features[GreenPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].green;
channel_features[BluePixelChannel].sum_variance[i]+=
(x-channel_features[BluePixelChannel].sum_entropy[i])*
(x-channel_features[BluePixelChannel].sum_entropy[i])*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].sum_variance[i]+=
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
(x-channel_features[BlackPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].sum_variance[i]+=
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
(x-channel_features[AlphaPixelChannel].sum_entropy[i])*
density_xy[x].direction[i].alpha;
}
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
y;
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Sum of Squares: Variance
*/
variance.direction[i].red+=(y-mean.direction[i].red+1)*
(y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red;
variance.direction[i].green+=(y-mean.direction[i].green+1)*
(y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green;
variance.direction[i].blue+=(y-mean.direction[i].blue+1)*
(y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=(y-mean.direction[i].black+1)*
(y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)*
(y-mean.direction[i].alpha+1)*
cooccurrence[x][y].direction[i].alpha;
/*
Sum average / Difference Variance.
*/
density_xy[MagickAbsoluteValue(y-x)].direction[i].red+=
cooccurrence[x][y].direction[i].red;
density_xy[MagickAbsoluteValue(y-x)].direction[i].green+=
cooccurrence[x][y].direction[i].green;
density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+=
cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
density_xy[MagickAbsoluteValue(y-x)].direction[i].black+=
cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
/*
Information Measures of Correlation.
*/
entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red*
MagickLog10(cooccurrence[x][y].direction[i].red);
entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green*
MagickLog10(cooccurrence[x][y].direction[i].green);
entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue*
MagickLog10(cooccurrence[x][y].direction[i].blue);
if (image->colorspace == CMYKColorspace)
entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black*
MagickLog10(cooccurrence[x][y].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy.direction[i].alpha-=
cooccurrence[x][y].direction[i].alpha*MagickLog10(
cooccurrence[x][y].direction[i].alpha);
entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red*
MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red));
entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green*
MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy1.direction[i].black-=(
cooccurrence[x][y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy1.direction[i].alpha-=(
cooccurrence[x][y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
entropy_xy2.direction[i].red-=(density_x[x].direction[i].red*
density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red*
density_y[y].direction[i].red));
entropy_xy2.direction[i].green-=(density_x[x].direction[i].green*
density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green*
density_y[y].direction[i].green));
entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue*
density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue*
density_y[y].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_xy2.direction[i].black-=(density_x[x].direction[i].black*
density_y[y].direction[i].black*MagickLog10(
density_x[x].direction[i].black*density_y[y].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha*
density_y[y].direction[i].alpha*MagickLog10(
density_x[x].direction[i].alpha*density_y[y].direction[i].alpha));
}
}
channel_features[RedPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].red;
channel_features[GreenPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].green;
channel_features[BluePixelChannel].variance_sum_of_squares[i]=
variance.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].variance_sum_of_squares[i]=
variance.direction[i].alpha;
}
/*
Compute more texture features.
*/
(void) memset(&variance,0,sizeof(variance));
(void) memset(&sum_squares,0,sizeof(sum_squares));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Difference variance.
*/
variance.direction[i].red+=density_xy[x].direction[i].red;
variance.direction[i].green+=density_xy[x].direction[i].green;
variance.direction[i].blue+=density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
variance.direction[i].black+=density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
variance.direction[i].alpha+=density_xy[x].direction[i].alpha;
sum_squares.direction[i].red+=density_xy[x].direction[i].red*
density_xy[x].direction[i].red;
sum_squares.direction[i].green+=density_xy[x].direction[i].green*
density_xy[x].direction[i].green;
sum_squares.direction[i].blue+=density_xy[x].direction[i].blue*
density_xy[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
sum_squares.direction[i].black+=density_xy[x].direction[i].black*
density_xy[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha*
density_xy[x].direction[i].alpha;
/*
Difference entropy.
*/
channel_features[RedPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].red*
MagickLog10(density_xy[x].direction[i].red);
channel_features[GreenPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].green*
MagickLog10(density_xy[x].direction[i].green);
channel_features[BluePixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].blue*
MagickLog10(density_xy[x].direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].black*
MagickLog10(density_xy[x].direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_entropy[i]-=
density_xy[x].direction[i].alpha*
MagickLog10(density_xy[x].direction[i].alpha);
/*
Information Measures of Correlation.
*/
entropy_x.direction[i].red-=(density_x[x].direction[i].red*
MagickLog10(density_x[x].direction[i].red));
entropy_x.direction[i].green-=(density_x[x].direction[i].green*
MagickLog10(density_x[x].direction[i].green));
entropy_x.direction[i].blue-=(density_x[x].direction[i].blue*
MagickLog10(density_x[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_x.direction[i].black-=(density_x[x].direction[i].black*
MagickLog10(density_x[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha*
MagickLog10(density_x[x].direction[i].alpha));
entropy_y.direction[i].red-=(density_y[x].direction[i].red*
MagickLog10(density_y[x].direction[i].red));
entropy_y.direction[i].green-=(density_y[x].direction[i].green*
MagickLog10(density_y[x].direction[i].green));
entropy_y.direction[i].blue-=(density_y[x].direction[i].blue*
MagickLog10(density_y[x].direction[i].blue));
if (image->colorspace == CMYKColorspace)
entropy_y.direction[i].black-=(density_y[x].direction[i].black*
MagickLog10(density_y[x].direction[i].black));
if (image->alpha_trait != UndefinedPixelTrait)
entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha*
MagickLog10(density_y[x].direction[i].alpha));
}
/*
Difference variance.
*/
channel_features[RedPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].red)-
(variance.direction[i].red*variance.direction[i].red))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[GreenPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].green)-
(variance.direction[i].green*variance.direction[i].green))/
((double) number_grays*number_grays*number_grays*number_grays);
channel_features[BluePixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].blue)-
(variance.direction[i].blue*variance.direction[i].blue))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].black)-
(variance.direction[i].black*variance.direction[i].black))/
((double) number_grays*number_grays*number_grays*number_grays);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].difference_variance[i]=
(((double) number_grays*number_grays*sum_squares.direction[i].alpha)-
(variance.direction[i].alpha*variance.direction[i].alpha))/
((double) number_grays*number_grays*number_grays*number_grays);
/*
Information Measures of Correlation.
*/
channel_features[RedPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/
(entropy_x.direction[i].red > entropy_y.direction[i].red ?
entropy_x.direction[i].red : entropy_y.direction[i].red);
channel_features[GreenPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/
(entropy_x.direction[i].green > entropy_y.direction[i].green ?
entropy_x.direction[i].green : entropy_y.direction[i].green);
channel_features[BluePixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/
(entropy_x.direction[i].blue > entropy_y.direction[i].blue ?
entropy_x.direction[i].blue : entropy_y.direction[i].blue);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/
(entropy_x.direction[i].black > entropy_y.direction[i].black ?
entropy_x.direction[i].black : entropy_y.direction[i].black);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_1[i]=
(entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/
(entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ?
entropy_x.direction[i].alpha : entropy_y.direction[i].alpha);
channel_features[RedPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red-
entropy_xy.direction[i].red)))));
channel_features[GreenPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green-
entropy_xy.direction[i].green)))));
channel_features[BluePixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue-
entropy_xy.direction[i].blue)))));
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black-
entropy_xy.direction[i].black)))));
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].measure_of_correlation_2[i]=
(sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha-
entropy_xy.direction[i].alpha)))));
}
/*
Compute more texture features.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,number_grays,1)
#endif
for (i=0; i < 4; i++)
{
ssize_t
z;
for (z=0; z < (ssize_t) number_grays; z++)
{
register ssize_t
y;
ChannelStatistics
pixel;
(void) memset(&pixel,0,sizeof(pixel));
for (y=0; y < (ssize_t) number_grays; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) number_grays; x++)
{
/*
Contrast: amount of local variations present in an image.
*/
if (((y-x) == z) || ((x-y) == z))
{
pixel.direction[i].red+=cooccurrence[x][y].direction[i].red;
pixel.direction[i].green+=cooccurrence[x][y].direction[i].green;
pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue;
if (image->colorspace == CMYKColorspace)
pixel.direction[i].black+=cooccurrence[x][y].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
pixel.direction[i].alpha+=
cooccurrence[x][y].direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
*/
Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red*
cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/
density_y[x].direction[i].red;
Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green*
cooccurrence[y][x].direction[i].green/
density_x[z].direction[i].green/density_y[x].direction[i].red;
Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue*
cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/
density_y[x].direction[i].blue;
if (image->colorspace == CMYKColorspace)
Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black*
cooccurrence[y][x].direction[i].black/
density_x[z].direction[i].black/density_y[x].direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
Q[z][y].direction[i].alpha+=
cooccurrence[z][x].direction[i].alpha*
cooccurrence[y][x].direction[i].alpha/
density_x[z].direction[i].alpha/
density_y[x].direction[i].alpha;
}
}
channel_features[RedPixelChannel].contrast[i]+=z*z*
pixel.direction[i].red;
channel_features[GreenPixelChannel].contrast[i]+=z*z*
pixel.direction[i].green;
channel_features[BluePixelChannel].contrast[i]+=z*z*
pixel.direction[i].blue;
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].contrast[i]+=z*z*
pixel.direction[i].black;
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].contrast[i]+=z*z*
pixel.direction[i].alpha;
}
/*
Maximum Correlation Coefficient.
Future: return second largest eigenvalue of Q.
*/
channel_features[RedPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
channel_features[BluePixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->colorspace == CMYKColorspace)
channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
if (image->alpha_trait != UndefinedPixelTrait)
channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]=
sqrt((double) -1.0);
}
/*
Relinquish resources.
*/
sum=(ChannelStatistics *) RelinquishMagickMemory(sum);
for (i=0; i < (ssize_t) number_grays; i++)
Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]);
Q=(ChannelStatistics **) RelinquishMagickMemory(Q);
density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y);
density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy);
density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x);
for (i=0; i < (ssize_t) number_grays; i++)
cooccurrence[i]=(ChannelStatistics *)
RelinquishMagickMemory(cooccurrence[i]);
cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence);
return(channel_features);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H o u g h L i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use HoughLineImage() in conjunction with any binary edge extracted image (we
% recommand Canny) to identify lines in the image. The algorithm accumulates
% counts for every white pixel for every possible orientation (for angles from
% 0 to 179 in 1 degree increments) and distance from the center of the image to
% the corner (in 1 px increments) and stores the counts in an accumulator matrix
% of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next
% it searches this space for peaks in counts and converts the locations of the
% peaks to slope and intercept in the normal x,y input image space. Use the
% slope/intercepts to find the endpoints clipped to the bounds of the image. The
% lines are then drawn. The counts are a measure of the length of the lines
%
% The format of the HoughLineImage method is:
%
% Image *HoughLineImage(const Image *image,const size_t width,
% const size_t height,const size_t threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find line pairs as local maxima in this neighborhood.
%
% o threshold: the line count threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define BoundingBox "viewbox"
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
status;
/*
Open image.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->columns=columns;
image->rows=rows;
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/
DefaultResolution;
draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/
DefaultResolution;
image->columns=(size_t) (draw_info->affine.sx*image->columns);
image->rows=(size_t) (draw_info->affine.sy*image->rows);
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Render drawing.
*/
if (GetBlobStreamData(image) == (unsigned char *) NULL)
draw_info->primitive=FileToString(image->filename,~0UL,exception);
else
{
draw_info->primitive=(char *) AcquireMagickMemory((size_t)
GetBlobSize(image)+1);
if (draw_info->primitive != (char *) NULL)
{
(void) memcpy(draw_info->primitive,GetBlobStreamData(image),
(size_t) GetBlobSize(image));
draw_info->primitive[GetBlobSize(image)]='\0';
}
}
(void) DrawImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
MagickExport Image *HoughLineImage(const Image *image,const size_t width,
const size_t height,const size_t threshold,ExceptionInfo *exception)
{
#define HoughLineImageTag "HoughLine/Image"
CacheView
*image_view;
char
message[MagickPathExtent],
path[MagickPathExtent];
const char
*artifact;
double
hough_height;
Image
*lines_image = NULL;
ImageInfo
*image_info;
int
file;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*accumulator;
PointInfo
center;
register ssize_t
y;
size_t
accumulator_height,
accumulator_width,
line_count;
/*
Create the accumulator.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
accumulator_width=180;
hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ?
image->rows : image->columns))/2.0);
accumulator_height=(size_t) (2.0*hough_height);
accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height,
sizeof(double),exception);
if (accumulator == (MatrixInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (NullMatrix(accumulator) == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Populate the accumulator.
*/
status=MagickTrue;
progress=0;
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelIntensity(image,p) > (QuantumRange/2.0))
{
register ssize_t
i;
for (i=0; i < 180; i++)
{
double
count,
radius;
radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+
(((double) y-center.y)*sin(DegreesToRadians((double) i)));
(void) GetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
count++;
(void) SetMatrixElement(accumulator,i,(ssize_t)
MagickRound(radius+hough_height),&count);
}
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CannyEdgeImage)
#endif
proceed=SetImageProgress(image,CannyEdgeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
/*
Generate line segments from accumulator.
*/
file=AcquireUniqueFileResource(path);
if (file == -1)
{
accumulator=DestroyMatrixInfo(accumulator);
return((Image *) NULL);
}
(void) FormatLocaleString(message,MagickPathExtent,
"# Hough line transform: %.20gx%.20g%+.20g\n",(double) width,
(double) height,(double) threshold);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
(void) FormatLocaleString(message,MagickPathExtent,
"# x1,y1 x2,y2 # count angle distance\n");
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
line_count=image->columns > image->rows ? image->columns/4 : image->rows/4;
if (threshold != 0)
line_count=threshold;
for (y=0; y < (ssize_t) accumulator_height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) accumulator_width; x++)
{
double
count;
(void) GetMatrixElement(accumulator,x,y,&count);
if (count >= (double) line_count)
{
double
maxima;
SegmentInfo
line;
ssize_t
v;
/*
Is point a local maxima?
*/
maxima=count;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((u != 0) || (v !=0))
{
(void) GetMatrixElement(accumulator,x+u,y+v,&count);
if (count > maxima)
{
maxima=count;
break;
}
}
}
if (u < (ssize_t) (width/2))
break;
}
(void) GetMatrixElement(accumulator,x,y,&count);
if (maxima > count)
continue;
if ((x >= 45) && (x <= 135))
{
/*
y = (r-x cos(t))/sin(t)
*/
line.x1=0.0;
line.y1=((double) (y-(accumulator_height/2.0))-((line.x1-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
line.x2=(double) image->columns;
line.y2=((double) (y-(accumulator_height/2.0))-((line.x2-
(image->columns/2.0))*cos(DegreesToRadians((double) x))))/
sin(DegreesToRadians((double) x))+(image->rows/2.0);
}
else
{
/*
x = (r-y cos(t))/sin(t)
*/
line.y1=0.0;
line.x1=((double) (y-(accumulator_height/2.0))-((line.y1-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
line.y2=(double) image->rows;
line.x2=((double) (y-(accumulator_height/2.0))-((line.y2-
(image->rows/2.0))*sin(DegreesToRadians((double) x))))/
cos(DegreesToRadians((double) x))+(image->columns/2.0);
}
(void) FormatLocaleString(message,MagickPathExtent,
"line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2,
maxima,(double) x,(double) y);
if (write(file,message,strlen(message)) != (ssize_t) strlen(message))
status=MagickFalse;
}
}
}
(void) close(file);
/*
Render lines to image canvas.
*/
image_info=AcquireImageInfo();
image_info->background_color=image->background_color;
(void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path);
artifact=GetImageArtifact(image,"background");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"background",artifact);
artifact=GetImageArtifact(image,"fill");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"fill",artifact);
artifact=GetImageArtifact(image,"stroke");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"stroke",artifact);
artifact=GetImageArtifact(image,"strokewidth");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"strokewidth",artifact);
lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception);
artifact=GetImageArtifact(image,"hough-lines:accumulator");
if ((lines_image != (Image *) NULL) &&
(IsStringTrue(artifact) != MagickFalse))
{
Image
*accumulator_image;
accumulator_image=MatrixToImage(accumulator,exception);
if (accumulator_image != (Image *) NULL)
AppendImageToList(&lines_image,accumulator_image);
}
/*
Free resources.
*/
accumulator=DestroyMatrixInfo(accumulator);
image_info=DestroyImageInfo(image_info);
(void) RelinquishUniqueFileResource(path);
return(GetFirstImageInList(lines_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M e a n S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MeanShiftImage() delineate arbitrarily shaped clusters in the image. For
% each pixel, it visits all the pixels in the neighborhood specified by
% the window centered at the pixel and excludes those that are outside the
% radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those
% that are within the specified color distance from the current mean, and
% computes a new x,y centroid from those coordinates and a new mean. This new
% x,y centroid is used as the center for a new window. This process iterates
% until it converges and the final mean is replaces the (original window
% center) pixel value. It repeats this process for the next pixel, etc.,
% until it processes all pixels in the image. Results are typically better with
% colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr.
%
% The format of the MeanShiftImage method is:
%
% Image *MeanShiftImage(const Image *image,const size_t width,
% const size_t height,const double color_distance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width, height: find pixels in this neighborhood.
%
% o color_distance: the color distance.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MeanShiftImage(const Image *image,const size_t width,
const size_t height,const double color_distance,ExceptionInfo *exception)
{
#define MaxMeanShiftIterations 100
#define MeanShiftImageTag "MeanShift/Image"
CacheView
*image_view,
*mean_view,
*pixel_view;
Image
*mean_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
mean_image=CloneImage(image,0,0,MagickTrue,exception);
if (mean_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse)
{
mean_image=DestroyImage(mean_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
pixel_view=AcquireVirtualCacheView(image,exception);
mean_view=AcquireAuthenticCacheView(mean_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,progress) \
magick_number_threads(mean_image,mean_image,mean_image->rows,1)
#endif
for (y=0; y < (ssize_t) mean_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) mean_image->columns; x++)
{
PixelInfo
mean_pixel,
previous_pixel;
PointInfo
mean_location,
previous_location;
register ssize_t
i;
GetPixelInfo(image,&mean_pixel);
GetPixelInfoPixel(image,p,&mean_pixel);
mean_location.x=(double) x;
mean_location.y=(double) y;
for (i=0; i < MaxMeanShiftIterations; i++)
{
double
distance,
gamma;
PixelInfo
sum_pixel;
PointInfo
sum_location;
ssize_t
count,
v;
sum_location.x=0.0;
sum_location.y=0.0;
GetPixelInfo(image,&sum_pixel);
previous_location=mean_location;
previous_pixel=mean_pixel;
count=0;
for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++)
{
ssize_t
u;
for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++)
{
if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2)))
{
PixelInfo
pixel;
status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t)
MagickRound(mean_location.x+u),(ssize_t) MagickRound(
mean_location.y+v),&pixel,exception);
distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+
(mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+
(mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue);
if (distance <= (color_distance*color_distance))
{
sum_location.x+=mean_location.x+u;
sum_location.y+=mean_location.y+v;
sum_pixel.red+=pixel.red;
sum_pixel.green+=pixel.green;
sum_pixel.blue+=pixel.blue;
sum_pixel.alpha+=pixel.alpha;
count++;
}
}
}
}
gamma=1.0/count;
mean_location.x=gamma*sum_location.x;
mean_location.y=gamma*sum_location.y;
mean_pixel.red=gamma*sum_pixel.red;
mean_pixel.green=gamma*sum_pixel.green;
mean_pixel.blue=gamma*sum_pixel.blue;
mean_pixel.alpha=gamma*sum_pixel.alpha;
distance=(mean_location.x-previous_location.x)*
(mean_location.x-previous_location.x)+
(mean_location.y-previous_location.y)*
(mean_location.y-previous_location.y)+
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)*
255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)*
255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)*
255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue);
if (distance <= 3.0)
break;
}
SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q);
SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q);
SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q);
SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(mean_image);
}
if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MeanShiftImage)
#endif
proceed=SetImageProgress(image,MeanShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
mean_view=DestroyCacheView(mean_view);
pixel_view=DestroyCacheView(pixel_view);
image_view=DestroyCacheView(image_view);
return(mean_image);
}
|
StomOmpc_02.c | /*
poe -rmpool 1 -procs 1
mpcc_r -qsmp=noauto:omp:explicit -O3 -qarch=pwr3 -qtune=pwr3 ompc_02.c bind.o -lm -o ompc_02
*/
#define FLT double
#define INT int
#include "mpi.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#if macintosh
#include <console.h>
#endif
FLT **matrix(INT nrl,INT nrh,INT ncl,INT nch);
FLT *vector(INT nl, INT nh);
INT *ivector(INT nl, INT nh);
INT mint(FLT x);
FLT walltime();
void bc(FLT ** psi,INT i1,INT i2,INT j1,INT j2);
void do_jacobi(FLT ** psi,FLT ** new_psi,FLT *diff,INT i1,INT i2,INT j1,INT j2);
void write_grid(FLT ** psi,INT i1,INT i2,INT j1,INT j2);
void do_transfer(FLT ** psi,INT i1,INT i2,INT j1,INT j2);
void do_force (INT i1,INT i2,INT j1,INT j2);
void do_transfer(FLT ** psi,INT i1,INT i2,INT j1,INT j2);
char* unique(char *name);
FLT force(FLT y);
#define pi 3.141592653589793239
FLT **the_for;
FLT dx,dy,a1,a2,a3,a4,a5,a6;
INT nx,ny;
FLT alpha;
FLT *svec1,*svec2,*rvec1,*rvec2;
INT numnodes,myid,mpi_err;
#define mpi_master 0
INT myrow,mycol;
INT nrow,ncol;
INT myrow,mycol;
INT myid_col,myid_row,nodes_row,nodes_col;
MPI_Status status;
MPI_Comm ROW_COMM,COL_COMM;
INT mytop,mybot,myleft,myright;
int main(int argc, char **argv)
{
FLT lx,ly,beta,gamma;
INT steps;
FLT t1,t2;
/*FLT t3,t4,dt; */
/* FLT diff */
FLT mydiff,diff;
FLT dx2,dy2,bottom;
FLT di,dj;
FLT **psi; /* our calculation grid */
FLT **new_psi; /* temp storage for the grid */
INT i,j,i1,i2,j1,j2;
INT iout;
#if macintosh
argc=ccommand(&argv);
#endif
mpi_err=MPI_Init(&argc,&argv);
mpi_err=MPI_Comm_size(MPI_COMM_WORLD,&numnodes);
mpi_err=MPI_Comm_rank(MPI_COMM_WORLD,&myid);
#pragma omp parallel
#pragma omp critical
{
// thread_bind();
}
/*
! find a reasonable grid topology based on the number
! of processors
*/
nrow=mint(sqrt((FLT)(numnodes)));
ncol=numnodes/nrow;
while (nrow*ncol != numnodes) {
nrow=nrow+1;
ncol=numnodes/nrow;
}
if(nrow > ncol){
i=ncol;
ncol=nrow;
nrow=i;
}
myrow=myid/ncol+1;
mycol=myid - (myrow-1)*ncol + 1;
if(myid == mpi_master) printf(" nrow= %d ncol= %d\n",nrow ,ncol);
/*
! make the row and col communicators
! all processors with the same row will be in the same ROW_COMM
*/
mpi_err=MPI_Comm_split(MPI_COMM_WORLD,myrow,mycol,&ROW_COMM);
mpi_err=MPI_Comm_rank( ROW_COMM, &myid_row);
mpi_err=MPI_Comm_size( ROW_COMM, &nodes_row);
/* ! all processors with the same col will be in the same COL_COMM */
mpi_err=MPI_Comm_split(MPI_COMM_WORLD,mycol,myrow,&COL_COMM);
mpi_err=MPI_Comm_rank( COL_COMM, &myid_col);
mpi_err=MPI_Comm_size( COL_COMM,& nodes_col);
/* ! find id of neighbors using the communicators created above */
mytop = myid_col-1;if( mytop < 0 )mytop = MPI_PROC_NULL;
mybot = myid_col+1;if( mybot == nodes_col)mybot = MPI_PROC_NULL;
myleft = myid_row-1;if( myleft < 0 )myleft = MPI_PROC_NULL;
myright = myid_row+1;if( myright == nodes_row)myright = MPI_PROC_NULL;
if(myid == mpi_master) {
scanf("%d %d",&nx,&ny);
scanf("%lg %lg",&lx,&ly);
scanf("%lg %lg %lg",&alpha,&beta,&gamma);
scanf("%d",&steps);
printf("%d %d\n",nx,ny);
printf("%g %g\n",lx,ly);
printf("%g %g %g\n",alpha,beta,gamma);
printf("%d\n",steps);
}
mpi_err=MPI_Bcast(&nx, 1,MPI_INT, mpi_master,MPI_COMM_WORLD);
mpi_err=MPI_Bcast(&ny, 1,MPI_INT, mpi_master,MPI_COMM_WORLD);
mpi_err=MPI_Bcast(&steps,1,MPI_INT, mpi_master,MPI_COMM_WORLD);
mpi_err=MPI_Bcast(&lx, 1,MPI_DOUBLE,mpi_master,MPI_COMM_WORLD);
mpi_err=MPI_Bcast(&ly, 1,MPI_DOUBLE,mpi_master,MPI_COMM_WORLD);
mpi_err=MPI_Bcast(&alpha,1,MPI_DOUBLE,mpi_master,MPI_COMM_WORLD);
mpi_err=MPI_Bcast(&beta, 1,MPI_DOUBLE,mpi_master,MPI_COMM_WORLD);
mpi_err=MPI_Bcast(&gamma,1,MPI_DOUBLE,mpi_master,MPI_COMM_WORLD);
/* calculate the constants for the calculations */
dx=lx/(nx+1);
dy=ly/(ny+1);
dx2=dx*dx;
dy2=dy*dy;
bottom=2.0*(dx2+dy2);
a1=(dy2/bottom)+(beta*dx2*dy2)/(2.0*gamma*dx*bottom);
a2=(dy2/bottom)-(beta*dx2*dy2)/(2.0*gamma*dx*bottom);
a3=dx2/bottom;
a4=dx2/bottom;
a5=dx2*dy2/(gamma*bottom);
a6=pi/(ly);
/* set the indices for the interior of the grid */
dj=(FLT)ny/(FLT)nodes_row;
j1=mint(1.0+myid_row*dj);
j2=mint(1.0+(myid_row+1)*dj)-1;
di=(FLT)nx/(FLT)nodes_col;
i1=mint(1.0+myid_col*di);
i2=mint(1.0+(myid_col+1)*di)-1;
if(myid == mpi_master)printf("nodes_row= %d nodes_col= %d\n",nodes_row,nodes_col);
printf("myid= %d myrow= %d mycol= %d\n",myid,myrow,mycol);
printf("myid= %d myid_row= %d myid_col= %d\n",myid,myid_row,myid_col);
printf("myid= %d holds [%d:%d][%d:%d]\n",myid,i1,i2,j1,j2);
/* allocate the grid to (i1-1:i2+1,j1-1:j2+1) this includes boundary cells */
psi= matrix((INT)(i1-1),(INT)(i2+1),(INT)(j1-1),(INT)(j2+1));
new_psi=matrix((INT)(i1-1),(INT)(i2+1),(INT)(j1-1),(INT)(j2+1));
the_for=matrix((INT)(i1-1),(INT)(i2+1),(INT)(j1-1),(INT)(j2+1));
svec1=vector((INT)(i1-1),(INT)(i2+1));
svec2=vector((INT)(i1-1),(INT)(i2+1));
rvec1=vector((INT)(i1-1),(INT)(i2+1));
rvec2=vector((INT)(i1-1),(INT)(i2+1));
/* set initial guess for the value of the grid */
for(i=i1-1;i<=i2+1;i++)
for(j=j1-1;j<=j2+1;j++)
psi[i][j]=1.0;
/* set boundary conditions */
bc(psi,i1,i2,j1,j2);
do_force(i1,i2,j1,j2);
/* do the jacobian iterations */
t1=MPI_Wtime();
iout=steps/100;
if(iout == 0)iout=1;
if(steps > 0){
for( i=1; i<=steps;i++) {
do_jacobi(psi,new_psi,&mydiff,i1,i2,j1,j2);
do_transfer(psi,i1,i2,j1,j2);
mpi_err= MPI_Reduce(&mydiff,&diff,1,MPI_DOUBLE,MPI_SUM,mpi_master,MPI_COMM_WORLD);
if(myid == mpi_master && i % iout == 0){
printf("%8d %15.5f\n",i,diff);
}
}
}
t2=MPI_Wtime();
if(myid == mpi_master)printf("run time = %10.3g\n",t2-t1);
/* write_grid(psi,i1,i2,j1,j2); */
mpi_err = MPI_Finalize();
return 0;
}
void bc(FLT ** psi,INT i1,INT i2,INT j1,INT j2){
/* sets the boundary conditions */
/* input is the grid and the indices for the interior cells */
INT j;
/* do the top edges */
if(i1 == 1) {
for(j=j1-1;j<=j2+1;j++)
psi[i1-1][j]=0.0;
}
/* do the bottom edges */
if(i2 == ny) {
for(j=j1-1;j<=j2+1;j++)
psi[i2+1][j]=0.0;
}
/* do left edges */
if(j1 == 1) {
for(j=i1-1;j<=i2+1;j++)
psi[j][j1-1]=0.0;
}
/* do right edges */
if(j2 == nx) {
for(j=i1-1;j<=i2+1;j++)
psi[j][j2+1]=0.0;
}
}
void do_jacobi(FLT ** psi,FLT ** new_psi,FLT *diff_in,INT i1,INT i2,INT j1,INT j2){
/*
! does a single Jacobi iteration step
! input is the grid and the indices for the interior cells
! new_psi is temp storage for the the updated grid
! output is the updated grid in psi and diff which is
! the sum of the differences between the old and new grids
*/
INT i,j;
FLT diff;
diff=0.0;
#pragma omp parallel for schedule(static) reduction(+: diff) private(j) firstprivate (a1,a2,a3,a4,a5)
for( i=i1;i<=i2;i++) {
for(j=j1;j<=j2;j++){
new_psi[i][j]=a1*psi[i+1][j] + a2*psi[i-1][j] +
a3*psi[i][j+1] + a4*psi[i][j-1] -
a5*the_for[i][j];
diff=diff+fabs(new_psi[i][j]-psi[i][j]);
}
}
*diff_in=diff;
#pragma omp parallel for schedule(static) private(j)
for( i=i1;i<=i2;i++)
for(j=j1;j<=j2;j++)
psi[i][j]=new_psi[i][j];
}
void do_force (INT i1,INT i2,INT j1,INT j2) {
/*
! sets the force conditions
! input is the grid and the indices for the interior cells
*/
FLT y;
INT i,j;
for( i=i1;i<=i2;i++) {
for(j=j1;j<=j2;j++){
y=j*dy;
the_for[i][j]=force(y);
}
}
}
FLT force(FLT y) {
return (-alpha*sin(y*a6));
}
/*
The routines matrix, ivector and vector were adapted from
Numerical Recipes in C The Art of Scientific Computing
Press, Flannery, Teukolsky, Vetting
Cambridge University Press, 1988.
*/
FLT **matrix(INT nrl,INT nrh,INT ncl,INT nch)
{
INT i;
FLT **m;
m=(FLT **) malloc((unsigned) (nrh-nrl+1)*sizeof(FLT*));
if (!m){
printf("allocation failure 1 in matrix()\n");
exit(1);
}
m -= nrl;
for(i=nrl;i<=nrh;i++) {
if(i == nrl){
m[i]=(FLT *) malloc((unsigned) (nrh-nrl+1)*(nch-ncl+1)*sizeof(FLT));
if (!m[i]){
printf("allocation failure 2 in matrix()\n");
exit(1);
}
m[i] -= ncl;
}
else {
m[i]=m[i-1]+(nch-ncl+1);
}
}
return m;
}
INT *ivector(INT nl, INT nh)
{
INT *v;
v=(INT *)malloc((unsigned) (nh-nl+1)*sizeof(INT));
if (!v) {
printf("allocation failure in ivector()\n");
exit(1);
}
return v-nl;
}
FLT *vector(INT nl, INT nh)
{
FLT *v;
v=(FLT *)malloc((unsigned) (nh-nl+1)*sizeof(FLT));
if (!v) {
printf("allocation failure in vector()\n");
exit(1);
}
return v-nl;
}
void do_transfer(FLT ** psi,INT i1,INT i2,INT j1,INT j2) {
INT num_x,num_y;
INT i,j;
num_x=i2-i1+3;
num_y=j2-j1+3;
for(i=i1-1;i<=i2+1;i++){
svec1[i]=psi[i][j1];
svec2[i]=psi[i][j2];
}
if((myid_col % 2) == 0){
/* send to left */
mpi_err=MPI_Send(&svec1[i1-1],num_x,MPI_DOUBLE,myleft,100,ROW_COMM);
/* rec from left */
mpi_err=MPI_Recv(&rvec1[i1-1],num_x,MPI_DOUBLE,myleft,100,ROW_COMM,&status);
/* rec from right */
mpi_err=MPI_Recv(&rvec2[i1-1],num_x,MPI_DOUBLE,myright,100,ROW_COMM,&status);
/* send to right */
mpi_err=MPI_Send(&svec2[i1-1],num_x,MPI_DOUBLE,myright,100,ROW_COMM);
}
else {
/* we are on an odd col processor */
/* rec from right */
mpi_err=MPI_Recv(&rvec2[i1-1],num_x,MPI_DOUBLE,myright,100,ROW_COMM,&status);
/* send to right */
mpi_err=MPI_Send(&svec2[i1-1],num_x,MPI_DOUBLE,myright,100,ROW_COMM);
/* send to left */
mpi_err=MPI_Send(&svec1[i1-1],num_x,MPI_DOUBLE,myleft,100,ROW_COMM);
/* rec from left */
mpi_err=MPI_Recv(&rvec1[i1-1],num_x,MPI_DOUBLE,myleft,100,ROW_COMM,&status);
}
if(myleft != MPI_PROC_NULL){
for(i=i1-1;i<=i2+1;i++){
psi[i][j1-1]=rvec1[i];
}
}
if(myright != MPI_PROC_NULL){
for(i=i1-1;i<=i2+1;i++){
psi[i][j2+1]=rvec2[i];
}
}
if((myid_row % 2) == 0){
/* send to top */
mpi_err=MPI_Send(&psi[i1][j1-1], num_y,MPI_DOUBLE,mytop,10, COL_COMM);
/* rec from top */
mpi_err=MPI_Recv(&psi[i1-1][j1-1],num_y,MPI_DOUBLE,mytop,10,COL_COMM,&status);
/* rec from bot */
mpi_err=MPI_Recv(&psi[i2+1][j1-1],num_y,MPI_DOUBLE,mybot,10,COL_COMM,&status);
/* send to bot */
mpi_err=MPI_Send(&psi[i2][j1-1], num_y,MPI_DOUBLE,mybot,10, COL_COMM);
}
else{
/* rec from bot */
mpi_err=MPI_Recv(&psi[i2+1][j1-1],num_y,MPI_DOUBLE,mybot,10,COL_COMM,&status);
/* send to bot */
mpi_err=MPI_Send(&psi[i2][j1-1], num_y,MPI_DOUBLE,mybot,10,COL_COMM);
/* send to top */
mpi_err=MPI_Send(&psi[i1][j1-1], num_y,MPI_DOUBLE,mytop,10,COL_COMM);
/* rec from top */
mpi_err=MPI_Recv(&psi[i1-1][j1-1],num_y,MPI_DOUBLE,mytop,10,COL_COMM,&status);
}
}
char* unique(char *name) {
static char unique_str[40];
int i;
for(i=0;i<40;i++) unique_str[i]=(char)0;
if(myid > 99){
sprintf(unique_str,"%s%d",name,myid);
}
else {
if(myid > 9)
sprintf(unique_str,"%s0%d",name,myid);
else
sprintf(unique_str,"%s00%d",name,myid);
}
return unique_str;
}
void write_grid(FLT ** psi,INT i1,INT i2,INT j1,INT j2) {
/* ! input is the grid and the indices for the interior cells */
INT i,j,i0,j0,i3,j3;
FILE *f18;
if(i1==1) {
i0=0;
}
else {
i0=i1;
}
if(i2==nx) {
i3=nx+1;
}
else {
i3=i2;
}
if(j1==1) {
j0=0;
}
else {
j0=j1;
}
if(j2==ny) {
j3=ny+1;
}
else {
j3=j2;
}
f18=fopen(unique("out2c_"),"w");
fprintf(f18,"%6d %6d\n",i3-i0+1,j3-j0+1);
for( i=i0;i<=i3;i++){
for( j=j0;j<=j3;j++){
fprintf(f18,"%14.7g",psi[i][j]);
if(j != j3)fprintf(f18," ");
}
fprintf(f18,"\n");
}
fclose(f18);
}
INT mint(FLT x) {
FLT y;
INT j;
j=(INT)x;
y=(FLT)j;
if(x-y >= 0.5)j++;
return j;
}
FLT walltime()
{
return((FLT)clock()/((FLT)CLOCKS_PER_SEC));
}
|
SpatialConvolutionLocal.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialConvolutionLocal.c"
#else
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
THTensor *input, THTensor *gradOutput,
THTensor *weight, THTensor *bias,
int kH, int kW, int dH,
int dW, int padH, int padW,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t nInputPlane = weight->size(2) / (kH * kW);
int64_t nOutputPlane = weight->size(1);
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 3, 0, nOutputPlane);
THNN_CHECK_DIM_SIZE(bias, 3, 1, outputHeight);
THNN_CHECK_DIM_SIZE(bias, 3, 2, outputWidth);
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
static THTensor* THNN_(view_weight_local)(THTensor *_weight)
{
THTensor *weight = THTensor_(newContiguous)(_weight);
AT_CHECK(!weight->is_empty() && (weight->dim() == 3 || weight->dim() == 6),
"weight tensor should be (non-empty) 3D or 6D - got size: ", weight->sizes());
if (weight->dim() == 6) {
int64_t s1 = weight->size(0) * weight->size(1);
int64_t s2 = weight->size(2);
int64_t s3 = weight->size(3) * weight->size(4) * weight->size(5);
THTensor *old_weight = weight;
weight = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(weight),
weight->storage_offset(),
s1, -1, s2, -1, s3, -1);
THTensor_(free)(old_weight);
}
return weight;
}
static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
(
THTensor *input, THTensor *output,
THTensor *weight, THTensor *bias, THTensor *finput,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *output3d, *finput3d;
THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
THTensor_(copy)(output, bias);
output3d = THTensor_(newWithStorage3d)
(THTensor_getStoragePtr(output), output->storage_offset(),
outputHeight * outputWidth, 1,
nOutputPlane, outputHeight * outputWidth,
1, nOutputPlane * outputHeight * outputWidth);
finput3d = THTensor_(newWithStorage3d)
(THTensor_getStoragePtr(finput), finput->storage_offset(),
outputHeight * outputWidth, 1,
kW * kH * nInputPlane, outputHeight * outputWidth,
1, kW * kH * nInputPlane * outputHeight * outputWidth);
// weight: oH*oW x nOutputPlane x nInputPlane*kH*kW
// finput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d);
// output3d: oH*oW x nOutputPlane x 1
THTensor_(free)(output3d);
THTensor_(free)(finput3d);
}
void THNN_(SpatialConvolutionLocal_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
int64_t nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH);
int64_t nOutputPlane = THTensor_(size)(weight, 1);
if(input->dim() == 3)
{
THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth);
THNN_(SpatialConvolutionLocal_updateOutput_frame)
(input, output, weight, bias, finput,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size(0);
int64_t t;
THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth);
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_updateOutput_frame)
(input_t, output_t, weight, bias, finput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(THTensor *gradInput, THTensor *gradOutput,
THTensor *weight, THTensor *fgradInput,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *gradOutput3d, *fgradInput3d;
gradOutput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
fgradInput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(fgradInput), fgradInput->storage_offset(),
outputHeight*outputWidth, 1,
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
// weight: oH*oW x nInputPlane*kH*kW x nOutputPlane
// gradOutput3d: oH*oW x nOutputPlane x 1
THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d);
// fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(free)(gradOutput3d);
THTensor_(free)(fgradInput3d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
}
void THNN_(SpatialConvolutionLocal_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
int64_t nInputPlane = THTensor_(size)(weight,2)/(kW*kH);
int64_t nOutputPlane = THTensor_(size)(weight,1);
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 1, 2);
if(input->dim() == 3)
{
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(gradInput, gradOutput, tweight,
fgradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size(0);
int64_t t;
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(gradInput_t, gradOutput_t, tweight, fgradInput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
THTensor *finput, real scale,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *gradOutput3d, *finput3d;
gradOutput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
finput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(finput), finput->storage_offset(),
outputHeight*outputWidth, 1,
1, kW*kH*nInputPlane*outputHeight*outputWidth,
kW*kH*nInputPlane, outputHeight*outputWidth);
// gradOutput3d: oH*oW x nOutputPlane x 1
// finput3d: oH*oW x 1 x kW*kH*nInputPlane
THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d);
// gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane
THTensor_(cadd)(gradBias, gradBias, scale, gradOutput);
THTensor_(free)(gradOutput3d);
THTensor_(free)(finput3d);
}
void THNN_(SpatialConvolutionLocal_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight,
accreal scale_)
{
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
gradWeight = THNN_(view_weight_local)(gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
int64_t nInputPlane = THTensor_(size)(gradWeight,2)/(kW*kH);
int64_t nOutputPlane = THTensor_(size)(gradWeight,1);
if(input->dim() == 3)
{
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(gradOutput, gradWeight, gradBias, finput, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size(0);
int64_t t;
for(t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(gradOutput_t, gradWeight, gradBias, finput_t, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(gradWeight);
}
#endif
|
mandel_par.c | /*
** PROGRAM: Mandelbrot area (solution)
**
** PURPOSE: Program to compute the area of a Mandelbrot set.
** The correct answer should be around 1.510659.
**
** USAGE: Program runs without input ... just run the executable
**
** ADDITIONAL EXERCISES: Experiment with the schedule clause to fix
** the load imbalance. Experiment with atomic vs. critical vs.
** reduction for numoutside.
**
** HISTORY: Written: (Mark Bull, August 2011).
**
** Changed "complex" to "d_complex" to avoid collsion with
** math.h complex type. Fixed data environment errors
** (Tim Mattson, September 2011)
**
** Changed "atomic" to "critical" to match Common Core
** (Helen He, November 2020)
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
# define NPOINTS 1000
# define MXITR 1000
struct d_complex {
double r;
double i;
};
void testpoint(struct d_complex);
struct d_complex c;
int numoutside = 0;
int main ()
{
int i, j;
double area, error, eps = 1.0e-5;
// Loop over grid of points in the complex plane which contains the Mandelbrot set,
// testing each point to see whether it is inside or outside the set.
double initTime = omp_get_wtime();
#pragma omp parallel for private(c) firstprivate(eps) collapse(2) schedule(dynamic,100)
for (i = 0; i < NPOINTS; i++) {
for (j = 0; j < NPOINTS; j++) {
c.r = -2.0 + 2.5 * (double)(i)/(double)(NPOINTS) + eps;
c.i = 1.125 * (double)(j)/(double)(NPOINTS) + eps;
testpoint(c);
}
}
// Calculate area of set and error estimate and output the results
area = 2.0 * 2.5 * 1.125 * (double)(NPOINTS * NPOINTS \
- numoutside)/(double)(NPOINTS * NPOINTS);
error = area / (double)NPOINTS;
double runtime = omp_get_wtime() - initTime;
printf("runtime = %lf seconds with %d threads\n",runtime, omp_get_num_threads());
printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error);
printf("Correct answer should be around 1.510659\n");
}
void testpoint(struct d_complex c)
{
// Does the iteration z=z*z+c, until |z| > 2 when point is known to be outside set
// If loop count reaches MAXITER, point is considered to be inside the set
struct d_complex z;
int iter;
double temp;
z = c;
for (iter = 0; iter < MXITR; iter++) {
temp = (z.r * z.r) - (z.i * z.i) + c.r;
z.i = z.r * z.i * 2 + c.i;
z.r = temp;
if ((z.r * z.r + z.i * z.i) > 4.0) {
#pragma omp critical
numoutside++;
break;
}
}
}
|
vecops.c | /* Copyright 2013-2015. The Regents of the University of California.
* Copyright 2016. Martin Uecker.
* Copyright 2017. University of Oxford.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2011-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2014 Frank Ong <frankong@berkeley.edu>
* 2014-2015 Jonathan Tamir <jtamir@eecs.berkeley.edu>
* 2017 Sofia Dimoudi <sofia.dimoudi@cardiov.ox.ac.uk>
*
*
* This file defines basic operations on vectors of floats/complex floats
* for operations on the CPU which are are used by higher level code
* (mainly num/flpmath.c and num/italgos.c) to implement more complex
* operations. The functions are exported by pointers stored in the
* global variable cpu_ops of type struct vec_ops. Identical functions
* are implemented for the GPU in gpukrnls.c.
*
*/
#include <assert.h>
#include <math.h>
#include <complex.h>
#include <stdbool.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc/misc.h"
#include "misc/debug.h"
#include "vecops.h"
/**
* Allocate memory for array of floats.
* Note: be sure to pass 2*N if allocating for complex float
*
* @param N number of elements
*/
static float* allocate(long N)
{
assert(N >= 0);
return xmalloc((size_t)N * sizeof(float));
}
static void del(float* vec)
{
free(vec);
}
static void copy(long N, float* dst, const float* src)
{
for (long i = 0; i < N; i++)
dst[i] = src[i];
}
static void float2double(long N, double* dst, const float* src)
{
for (long i = 0; i < N; i++)
dst[i] = src[i];
}
static void double2float(long N, float* dst, const double* src)
{
for (long i = 0; i < N; i++)
dst[i] = src[i];
}
/*
* Set vector to all-zeros
*
* @param N vector length
* @param vec vector
*/
static void clear(long N, float* vec)
{
for (long i = 0; i < N; i++)
vec[i] = 0.;
}
static double dot(long N, const float* vec1, const float* vec2)
{
double res = 0.;
for (long i = 0; i < N; i++)
res += vec1[i] * vec2[i];
//res = fma((double)vec1[i], (double)vec2[i], res);
return res;
}
/**
* Compute l2 norm of vec
*
* @param N vector length
* @param vec vector
*/
static double norm(long N, const float* vec)
{
double res = 0.;
for (long i = 0; i < N; i++)
res += vec[i] * vec[i];
//res = fma((double)vec[i], (double)vec[i], res);
return sqrt(res);
}
/**
* Compute l1 norm of vec
*
* @param N vector length
* @param vec vector
*/
static double asum(long N, const float* vec)
{
double res = 0.;
for (long i = 0; i < N; i++)
res += fabsf(vec[i]);
return res;
}
/**
* Compute l1 norm of complex vec
*
* @param N vector length
* @param vec vector
*/
static double zl1norm(long N, const complex float* vec)
{
double res = 0.;
for (long i = 0; i < N; i++)
res += cabsf(vec[i]);
return res;
}
static void axpy(long N, float* dst, float alpha, const float* src)
{
if (0. != alpha)
for (long i = 0; i < N; i++)
dst[i] += alpha * src[i];
// dst[i] = fmaf(alpha, src[i], dst[i]);
}
static void xpay(long N, float beta, float* dst, const float* src)
{
for (long i = 0; i < N; i++)
dst[i] = dst[i] * beta + src[i];
// dst[i] = fmaf(beta, dst[i], src[i]);
}
static void smul(long N, float alpha, float* dst, const float* src)
{
for (long i = 0; i < N; i++)
dst[i] = alpha * src[i];
//dst[i] = fmaf(alpha, src[i], 0.f);
}
static void add(long N, float* dst, const float* src1, const float* src2)
{
#if 1
if (dst == src1) {
for (long i = 0; i < N; i++)
dst[i] += src2[i];
} else
#endif
for (long i = 0; i < N; i++)
dst[i] = src1[i] + src2[i];
}
static void sub(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = src1[i] - src2[i];
}
static void mul(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = src1[i] * src2[i];
}
static void vec_div(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
//dst[i] = src1[i] / src2[i];
dst[i] = (src2[i] == 0) ? 0.f : src1[i] / src2[i];
}
static void fmac(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] += src1[i] * src2[i];
//dst[i] = fmaf(src1[i], src2[i], dst[i]);
}
static void fmac2(long N, double* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] += src1[i] * src2[i];
}
static void zmul(long N, complex float* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = src1[i] * src2[i];
}
static void zdiv(long N, complex float* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = (src2[i] == 0) ? 0.f : src1[i] / src2[i];
}
static void zpow(long N, complex float* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = cpowf(src1[i], src2[i]);
}
static void zfmac(long N, complex float* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] += src1[i] * src2[i];
}
static void zfmac2(long N, complex double* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] += src1[i] * src2[i];
}
static void zmulc(long N, complex float* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = src1[i] * conjf(src2[i]);
}
static void zfmacc(long N, complex float* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] += src1[i] * conjf(src2[i]);
}
static void zfmacc2(long N, complex double* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] += src1[i] * conjf(src2[i]);
}
static void zconj(long N, complex float* dst, const complex float* src)
{
for (long i = 0; i < N; i++)
dst[i] = conjf(src[i]);
}
static void zcmp(long N, complex float* dst, const complex float* src1, const complex float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = (src1[i] == src2[i]) ? 1. : 0.;
}
static void zdiv_reg(long N, complex float* dst, const complex float* src1, const complex float* src2, complex float lambda)
{
for (long i = 0; i < N; i++)
dst[i] = (src2[i] == 0) ? 0.f : src1[i] / (lambda + src2[i]);
}
static void zphsr(long N, complex float* dst, const complex float* src)
{
for (long i = 0; i < N; i++) {
float s = cabsf(src[i]);
/* Note: the comparison (0 == src[i]) is not enough with `--fast-math`
* with gcc 4.4.3 (but seems to work for 4.7.3, different computer)
* Test:
* complex float a = FLT_MIN;
* complex float c = a / cabsf(a);
* assert(!(isnan(creal(c)) || isnan(cimag(c))));
*/
dst[i] = (0. == s) ? 1. : (src[i] / s);
}
}
static void zexpj(long N, complex float* dst, const complex float* src)
{
for (long i = 0; i < N; i++)
dst[i] = cexpf(1.I * src[i]);
}
static void zarg(long N, complex float* dst, const complex float* src)
{
for (long i = 0; i < N; i++)
dst[i] = cargf(src[i]);
}
static void max(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = MAX(src1[i], src2[i]);
}
static void min(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = MIN(src1[i], src2[i]);
}
static void vec_pow(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = powf(src1[i], src2[i]);
}
static void vec_sqrt(long N, float* dst, const float* src)
{
for (long i = 0; i < N; i++)
dst[i] = sqrtf(src[i]);
}
static void vec_le(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = (src1[i] <= src2[i]);
}
static void vec_ge(long N, float* dst, const float* src1, const float* src2)
{
for (long i = 0; i < N; i++)
dst[i] = (src1[i] >= src2[i]);
}
/**
* Step (1) of soft thesholding, y = ST(x, lambda).
* Only computes the residual, resid = MAX( (abs(x) - lambda)/abs(x)), 0 )
*
* @param N number of elements
* @param lambda threshold parameter
* @param d pointer to destination, resid
* @param x pointer to input
*/
static void zsoftthresh_half(long N, float lambda, complex float* d, const complex float* x)
{
for (long i = 0; i < N; i++) {
float norm = cabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) : 0.;
}
}
static void zsoftthresh(long N, float lambda, complex float* d, const complex float* x)
{
for (long i = 0; i < N; i++) {
float norm = cabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) * x[i]: 0.;
}
}
static void softthresh_half(long N, float lambda, float* d, const float* x)
{
for (long i = 0; i < N; i++) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) : 0.;
}
}
static void softthresh(long N, float lambda, float* d, const float* x)
{
for (long i = 0; i < N; i++) {
float norm = fabsf(x[i]);
float red = norm - lambda;
d[i] = (red > 0.) ? (red / norm) * x[i] : 0.;
}
}
/**
* Return the absolute value of the kth largest array element
* To be used for hard thresholding
*
* @param N number of elements
* @param k the sorted element index to pick
* @param ar the input complex array
*
* @returns the absolute value of the kth largest array element.
*
*/
static float klargest_complex_partsort( unsigned int N, unsigned int k, const complex float* ar)
{
assert(k <= N);
complex float* tmp = (complex float*)xmalloc(N * sizeof(complex float));
copy(2 * N, (float*)tmp, (float*)ar);
float thr = quickselect_complex(tmp, N, k);
xfree(tmp);
return thr;
}
/**
* Hard thesholding, y = HT(x, thr).
* computes the thresholded vector, y = x * (abs(x) >= t(kmax))
*
* @param N number of elements
* @param k threshold parameter, index of kth largest element of sorted x
* @param d pointer to destination, y
* @param x pointer to input
*/
static void zhardthresh(long N, unsigned int k, complex float* d, const complex float* x)
{
float thr = klargest_complex_partsort(N, k, x);
for (long i = 0; i < N; i++) {
float norm = cabsf(x[i]);
d[i] = (norm > thr) ? x[i] : 0.;
}
}
/* Apply the non-zero support of one vector to another, complex numbers */
static void nzsupport(long N, float* out, const float* in)
{
#ifdef _OPENMP
int par = 0;
par = omp_in_parallel();
#endif
#pragma omp parallel for if (par == 0) // if not already in a parallel region
for (long i = 0; i < N; ++i){
if (in[i] == 0.)
out[i] = 0.;
}
}
static void swap(long N, float* a, float* b)
{
for (long i = 0; i < N; i++) {
float tmp = a[i];
a[i] = b[i];
b[i] = tmp;
}
}
// identical copy in num/fft.c
static double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
static complex double fftmod_phase2(long n, int j, bool inv, double phase)
{
phase += fftmod_phase(n, j);
double rem = phase - floor(phase);
double sgn = inv ? -1. : 1.;
#if 1
if (rem == 0.)
return 1.;
if (rem == 0.5)
return -1.;
if (rem == 0.25)
return 1.i * sgn;
if (rem == 0.75)
return -1.i * sgn;
#endif
return cexp(M_PI * 2.i * sgn * rem);
}
static void zfftmod(long N, complex float* dst, const complex float* src, unsigned int n, bool inv, double phase)
{
for (long i = 0; i < N; i++)
for (unsigned int j = 0; j < n; j++)
dst[i * n + j] = src[i * n + j] * fftmod_phase2(n, j, inv, phase);
}
/*
* If you add functions here, please also add to gpuops.c/gpukrnls.cu
*/
const struct vec_ops cpu_ops = {
.float2double = float2double,
.double2float = double2float,
.dot = dot,
.asum = asum,
.zl1norm = zl1norm,
.add = add,
.sub = sub,
.mul = mul,
.div = vec_div,
.fmac = fmac,
.fmac2 = fmac2,
.axpy = axpy,
.pow = vec_pow,
.sqrt = vec_sqrt,
.le = vec_le,
.ge = vec_ge,
.zmul = zmul,
.zdiv = zdiv,
.zfmac = zfmac,
.zfmac2 = zfmac2,
.zmulc = zmulc,
.zfmacc = zfmacc,
.zfmacc2 = zfmacc2,
.zpow = zpow,
.zphsr = zphsr,
.zconj = zconj,
.zexpj = zexpj,
.zarg = zarg,
.zcmp = zcmp,
.zdiv_reg = zdiv_reg,
.zfftmod = zfftmod,
.max = max,
.min = min,
.zsoftthresh = zsoftthresh,
.zsoftthresh_half = zsoftthresh_half,
.softthresh = softthresh,
.softthresh_half = softthresh_half,
.zhardthresh = zhardthresh,
.nzsupport = nzsupport,
};
// defined in iter/vec.h
struct vec_iter_s {
float* (*allocate)(long N);
void (*del)(float* x);
void (*clear)(long N, float* x);
void (*copy)(long N, float* a, const float* x);
void (*swap)(long N, float* a, float* x);
double (*norm)(long N, const float* x);
double (*dot)(long N, const float* x, const float* y);
void (*sub)(long N, float* a, const float* x, const float* y);
void (*add)(long N, float* a, const float* x, const float* y);
void (*smul)(long N, float alpha, float* a, const float* x);
void (*xpay)(long N, float alpha, float* a, const float* x);
void (*axpy)(long N, float* a, float alpha, const float* x);
void (*nzsupport)(long N, float* out, const float* in);
};
extern const struct vec_iter_s cpu_iter_ops;
const struct vec_iter_s cpu_iter_ops = {
.allocate = allocate,
.del = del,
.clear = clear,
.copy = copy,
.dot = dot,
.norm = norm,
.axpy = axpy,
.xpay = xpay,
.smul = smul,
.add = add,
.sub = sub,
.swap = swap,
.nzsupport = nzsupport,
};
|
DRB011-minusminus-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The -- operation on numNodes2 is not protected, causing data race.
Data race pair: numNodes2@74:7 vs. numNodes2@74:7
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int len = 100;
int numNodes = len;
int numNodes2 = 10;
int x[100];
// initialize x[]
#pragma omp parallel for private (i) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
if (i % 2 == 0)
x[i] = 5;
else
x[i] = - 5;
}
#pragma omp parallel for private (i) reduction (-:numNodes2)
for (i = numNodes - 1; i >= 0; i += -1) {
if (x[i] <= 0) {
numNodes2--;
}
}
printf("numNodes2 = %d\n",numNodes2);
return 0;
}
|
irbuilder_nested_openmp_parallel_empty.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefixes=ALL,IRBUILDER
// %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o /tmp/t1 %s
// %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -include-pch /tmp/t1 -verify %s -emit-llvm -o - | FileCheck --check-prefixes=ALL-DEBUG,IRBUILDER-DEBUG %s
// expected-no-diagnostics
// TODO: Teach the update script to check new functions too.
#ifndef HEADER
#define HEADER
// ALL-LABEL: @_Z17nested_parallel_0v(
// ALL-NEXT: entry:
// ALL-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// ALL-NEXT: br label [[OMP_PARALLEL:%.*]]
// ALL: omp_parallel:
// ALL-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z17nested_parallel_0v..omp_par.1 to void (i32*, i32*, ...)*))
// ALL-NEXT: br label [[OMP_PAR_OUTLINED_EXIT12:%.*]]
// ALL: omp.par.outlined.exit12:
// ALL-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// ALL: omp.par.exit.split:
// ALL-NEXT: ret void
//
void nested_parallel_0(void) {
#pragma omp parallel
{
#pragma omp parallel
{
}
}
}
// ALL-LABEL: @_Z17nested_parallel_1Pfid(
// ALL-NEXT: entry:
// ALL-NEXT: [[STRUCTARG14:%.*]] = alloca { { i32*, double*, float** }*, i32*, double*, float** }, align 8
// ALL-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8
// ALL-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// ALL-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// ALL-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// ALL-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// ALL-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// ALL-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// ALL-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// ALL-NEXT: br label [[OMP_PARALLEL:%.*]]
// ALL: omp_parallel:
// ALL-NEXT: [[GEP_STRUCTARG:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG14]], i32 0, i32 0
// ALL-NEXT: store { i32*, double*, float** }* [[STRUCTARG]], { i32*, double*, float** }** [[GEP_STRUCTARG]], align 8
// ALL-NEXT: [[GEP_A_ADDR15:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG14]], i32 0, i32 1
// ALL-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR15]], align 8
// ALL-NEXT: [[GEP_B_ADDR16:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG14]], i32 0, i32 2
// ALL-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR16]], align 8
// ALL-NEXT: [[GEP_R_ADDR17:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG14]], i32 0, i32 3
// ALL-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR17]], align 8
// ALL-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { { i32*, double*, float** }*, i32*, double*, float** }*)* @_Z17nested_parallel_1Pfid..omp_par.2 to void (i32*, i32*, ...)*), { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG14]])
// ALL-NEXT: br label [[OMP_PAR_OUTLINED_EXIT13:%.*]]
// ALL: omp.par.outlined.exit13:
// ALL-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// ALL: omp.par.exit.split:
// ALL-NEXT: ret void
//
void nested_parallel_1(float *r, int a, double b) {
#pragma omp parallel
{
#pragma omp parallel
{
*r = a + b;
}
}
}
// ALL-LABEL: @_Z17nested_parallel_2Pfid(
// ALL-NEXT: entry:
// ALL-NEXT: [[STRUCTARG68:%.*]] = alloca { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }, align 8
// ALL-NEXT: [[STRUCTARG64:%.*]] = alloca { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }, align 8
// ALL-NEXT: [[STRUCTARG59:%.*]] = alloca { i32*, double*, float** }, align 8
// ALL-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8
// ALL-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// ALL-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// ALL-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// ALL-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// ALL-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// ALL-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// ALL-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// ALL-NEXT: br label [[OMP_PARALLEL:%.*]]
// ALL: omp_parallel:
// ALL-NEXT: [[GEP_A_ADDR:%.*]] = getelementptr { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }, { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG68]], i32 0, i32 0
// ALL-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR]], align 8
// ALL-NEXT: [[GEP_B_ADDR:%.*]] = getelementptr { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }, { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG68]], i32 0, i32 1
// ALL-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR]], align 8
// ALL-NEXT: [[GEP_R_ADDR:%.*]] = getelementptr { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }, { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG68]], i32 0, i32 2
// ALL-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR]], align 8
// ALL-NEXT: [[GEP_STRUCTARG64:%.*]] = getelementptr { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }, { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG68]], i32 0, i32 3
// ALL-NEXT: store { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG64]], { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }** [[GEP_STRUCTARG64]], align 8
// ALL-NEXT: [[GEP_STRUCTARG69:%.*]] = getelementptr { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }, { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG68]], i32 0, i32 4
// ALL-NEXT: store { i32*, double*, float** }* [[STRUCTARG]], { i32*, double*, float** }** [[GEP_STRUCTARG69]], align 8
// ALL-NEXT: [[GEP_STRUCTARG5970:%.*]] = getelementptr { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }, { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG68]], i32 0, i32 5
// ALL-NEXT: store { i32*, double*, float** }* [[STRUCTARG59]], { i32*, double*, float** }** [[GEP_STRUCTARG5970]], align 8
// ALL-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }*)* @_Z17nested_parallel_2Pfid..omp_par.5 to void (i32*, i32*, ...)*), { i32*, double*, float**, { i32*, double*, float**, { i32*, double*, float** }*, { i32*, double*, float** }* }*, { i32*, double*, float** }*, { i32*, double*, float** }* }* [[STRUCTARG68]])
// ALL-NEXT: br label [[OMP_PAR_OUTLINED_EXIT55:%.*]]
// ALL: omp.par.outlined.exit55:
// ALL-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// ALL: omp.par.exit.split:
// ALL-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
// ALL-NEXT: [[CONV56:%.*]] = sitofp i32 [[TMP0]] to double
// ALL-NEXT: [[TMP1:%.*]] = load double, double* [[B_ADDR]], align 8
// ALL-NEXT: [[ADD57:%.*]] = fadd double [[CONV56]], [[TMP1]]
// ALL-NEXT: [[CONV58:%.*]] = fptrunc double [[ADD57]] to float
// ALL-NEXT: [[TMP2:%.*]] = load float*, float** [[R_ADDR]], align 8
// ALL-NEXT: store float [[CONV58]], float* [[TMP2]], align 4
// ALL-NEXT: ret void
//
void nested_parallel_2(float *r, int a, double b) {
#pragma omp parallel
{
*r = a + b;
#pragma omp parallel
{
*r = a + b;
#pragma omp parallel
{
*r = a + b;
}
*r = a + b;
#pragma omp parallel
{
*r = a + b;
}
*r = a + b;
}
*r = a + b;
}
*r = a + b;
}
#endif
|
variable_utils.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Ruben Zorrilla
// Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_VARIABLE_UTILS )
#define KRATOS_VARIABLE_UTILS
/* System includes */
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/checks.h"
#include "utilities/parallel_utilities.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class VariableUtils
* @ingroup KratosCore
* @brief This class implements a set of auxiliar, already parallelized, methods to
* perform some common tasks related with the variable values and fixity.
* @details The methods are exported to python in order to add this improvements to the python interface
* @author Riccardo Rossi
* @author Ruben Zorrilla
* @author Vicente Mataix Ferrandiz
*/
class KRATOS_API(KRATOS_CORE) VariableUtils
{
public:
///@name Type Definitions
///@{
/// The node type
typedef ModelPart::NodeType NodeType;
/// The condition type
typedef ModelPart::ConditionType ConditionType;
/// The element type
typedef ModelPart::ElementType ElementType;
/// We create the Pointer related to VariableUtils
KRATOS_CLASS_POINTER_DEFINITION(VariableUtils);
/// The nodes container
typedef ModelPart::NodesContainerType NodesContainerType;
/// The conditions container
typedef ModelPart::ConditionsContainerType ConditionsContainerType;
/// The elements container
typedef ModelPart::ElementsContainerType ElementsContainerType;
/// A definition of the double variable
typedef Variable< double > DoubleVarType;
/// A definition of the array variable
typedef Variable< array_1d<double, 3 > > ArrayVarType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
/** Destructor.
*/
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Copies the nodal value of a variable from an origin model
* part nodes to the nodes in a destination model part. It is assumed that
* both origin and destination model parts have the same number of nodes.
* @param rVariable reference to the variable to get the value from
* @param rDestinationVariable reference to the variable to be set
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartNodalVar(
const TVarType& rVariable,
const TVarType& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const unsigned int BuffStep = 0)
{
const int n_orig_nodes = rOriginModelPart.NumberOfNodes();
const int n_dest_nodes = rDestinationModelPart.NumberOfNodes();
KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) << "Origin and destination model parts have different number of nodes."
<< "\n\t- Number of origin nodes: " << n_orig_nodes
<< "\n\t- Number of destination nodes: " << n_dest_nodes << std::endl;
#pragma omp parallel for
for(int i_node = 0; i_node < n_orig_nodes; ++i_node){
auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node;
const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node;
const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep);
it_dest_node->GetSolutionStepValue(rDestinationVariable, BuffStep) = r_value;
}
}
/**
* @brief Copies the nodal value of a variable from an origin model
* part nodes to the nodes in a destination model part. It is assumed that
* both origin and destination model parts have the same number of nodes.
* @param rVariable reference to the variable to get the value from and to save in
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartNodalVar(
const TVarType& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const unsigned int BuffStep = 0)
{
this->CopyModelPartNodalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep);
}
template< class TVarType >
void CopyModelPartNodalVarToNonHistoricalVar(
const TVarType &rVariable,
const TVarType &rDestinationVariable,
const ModelPart &rOriginModelPart,
ModelPart &rDestinationModelPart,
const unsigned int BuffStep = 0)
{
const int n_orig_nodes = rOriginModelPart.NumberOfNodes();
const int n_dest_nodes = rDestinationModelPart.NumberOfNodes();
KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) <<
"Origin and destination model parts have different number of nodes." <<
"\n\t- Number of origin nodes: " << n_orig_nodes <<
"\n\t- Number of destination nodes: " << n_dest_nodes << std::endl;
#pragma omp parallel for
for(int i_node = 0; i_node < n_orig_nodes; ++i_node){
auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node;
const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node;
const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep);
it_dest_node->GetValue(rDestinationVariable) = r_value;
}
}
template< class TVarType >
void CopyModelPartNodalVarToNonHistoricalVar(
const TVarType &rVariable,
const ModelPart &rOriginModelPart,
ModelPart &rDestinationModelPart,
const unsigned int BuffStep = 0)
{
this->CopyModelPartNodalVarToNonHistoricalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
KRATOS_ERROR_IF(
rOriginModelPart.FullName() == rDestinationModelPart.FullName() &&
rOriginVariable == rDestinationVariable &&
ReadBufferStep == WriteBufferStep)
<< "Trying to copy flagged nodal solution step values with the same origin and destination model parts/variables/buffer steps. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << ", buffer step: " << ReadBufferStep << " ) !";
KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable))
<< rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( "
<< rOriginModelPart.Name() << " ).";
KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable))
<< rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( "
<< rDestinationModelPart.Name() << " ).";
KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize())
<< "Origin model part ( " << rOriginModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ].";
KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize())
<< "Destination model part ( " << rDestinationModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ].";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.FastGetSolutionStepValue(
rDestinationVariable, WriteBufferStep) = rValue;
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep);
});
rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart,
rFlag, CheckValue, ReadBufferStep, WriteBufferStep);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart,
rFlag, CheckValue, ReadBufferStep, WriteBufferStep);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
KRATOS_TRY
KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable))
<< rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( "
<< rOriginModelPart.Name() << " ).";
KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize())
<< "Origin model part ( " << rOriginModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ].";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.SetValue(rDestinationVariable, rValue);
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep);
});
rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart,
rFlag, CheckValue, ReadBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart,
rFlag, CheckValue, ReadBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
rVariable, rVariable, rModelPart, rModelPart,
rFlag, CheckValue, ReadBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable))
<< rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( "
<< rDestinationModelPart.Name() << " ).";
KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize())
<< "Destination model part ( " << rDestinationModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ].";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.FastGetSolutionStepValue(
rDestinationVariable, WriteBufferStep) = rValue;
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.GetValue(rOriginVariable);
});
rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart,
rFlag, CheckValue, WriteBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart,
rFlag, CheckValue, WriteBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
rVariable, rVariable, rModelPart, rModelPart,
rFlag, CheckValue, WriteBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
KRATOS_ERROR_IF(
rOriginModelPart.FullName() == rDestinationModelPart.FullName() &&
rOriginVariable == rDestinationVariable
) << "Trying to copy flagged nodal non-historical values with the same model parts/variables. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << " ) !";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.SetValue(rDestinationVariable, rValue);
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.GetValue(rOriginVariable);
});
rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedElementVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable)
<< "Trying to copy flagged elemental variable data with the same model "
"parts/variables. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << " ) !";
CopyModelPartFlaggedVariable<ElementsContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](ElementType& rDestElement, const TDataType& rValue) {
rDestElement.SetValue(rDestinationVariable, rValue);
},
[&](const ElementType& rOriginElement) -> const TDataType& {
return rOriginElement.GetValue(rOriginVariable);
});
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedElementVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedElementVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedElementVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedElementVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedConditionVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable)
<< "Trying to copy flagged condition variable data with the same model "
"parts/variables. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << " ) !";
CopyModelPartFlaggedVariable<ConditionsContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](ConditionType& rDestCondition, const TDataType& rValue) {
rDestCondition.SetValue(rDestinationVariable, rValue);
},
[&](const ConditionType& rOriginCondition) -> const TDataType& {
return rOriginCondition.GetValue(rOriginVariable);
});
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedConditionVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedConditionVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedConditionVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedConditionVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue);
}
/**
* @brief Copies the elemental value of a variable from an origin model
* part elements to the elements in a destination model part. It is assumed that
* both origin and destination model parts have the same number of elements.
* @param rVariable reference to the variable to be set
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartElementalVar(
const TVarType& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart){
const int n_orig_elems = rOriginModelPart.NumberOfElements();
const int n_dest_elems = rDestinationModelPart.NumberOfElements();
KRATOS_ERROR_IF_NOT(n_orig_elems == n_dest_elems) << "Origin and destination model parts have different number of elements."
<< "\n\t- Number of origin elements: " << n_orig_elems
<< "\n\t- Number of destination elements: " << n_dest_elems << std::endl;
#pragma omp parallel for
for(int i_elems = 0; i_elems < n_orig_elems; ++i_elems){
auto it_dest_elems = rDestinationModelPart.ElementsBegin() + i_elems;
const auto &it_orig_elems = rOriginModelPart.ElementsBegin() + i_elems;
const auto &r_value = it_orig_elems->GetValue(rVariable);
it_dest_elems->SetValue(rVariable,r_value);
}
}
/**
* @brief Sets the nodal value of a scalar variable
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template <class TVarType>
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetScalarVar(
const TVarType &rVariable,
const double Value,
NodesContainerType &rNodes)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVariable) = Value;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable (considering flag)
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template< class TVarType >
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetScalarVarForFlag(
const TVarType& rVariable,
const double Value,
NodesContainerType& rNodes,
const Flags Flag,
const bool Check = true
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
if (it_node->Is(Flag) == Check) it_node->FastGetSolutionStepValue(rVariable) = Value;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a vector variable
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetVectorVar(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes
);
/**
* @brief Sets the nodal value of a vector variable (considering flag)
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetVectorVarForFlag(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes,
const Flags Flag,
const bool Check = true
);
/**
* @brief Sets the nodal value of a scalar variable
* @tparam TDataType Variable data type
* @tparam Variable<TDataType> Variable type
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template<class TDataType, class TVarType = Variable<TDataType> >
void SetVariable(
const TVarType& rVariable,
const TDataType& rValue,
NodesContainerType& rNodes
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVariable) = rValue;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable (considering flag)
* @tparam TDataType Variable data type
* @tparam Variable<TDataType> Variable type
* @param rVariable reference to the scalar variable to be set
* @param rValue Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template <class TDataType, class TVarType = Variable<TDataType>>
void SetVariable(
const TVarType &rVariable,
const TDataType &rValue,
NodesContainerType &rNodes,
const Flags Flag,
const bool CheckValue = true)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rNodes.size()); ++k)
{
auto it_node = rNodes.begin() + k;
if (it_node->Is(Flag) == CheckValue)
{
it_node->FastGetSolutionStepValue(rVariable) = rValue;
}
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of any variable to zero
* @param rVariable reference to the scalar variable to be set
* @param rNodes reference to the objective node set
*/
template< class TType , class TContainerType>
void SetNonHistoricalVariableToZero(
const Variable< TType >& rVariable,
TContainerType& rContainer)
{
KRATOS_TRY
this->SetNonHistoricalVariable(rVariable, rVariable.Zero(), rContainer);
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of any variable to zero
* @param rVariable reference to the scalar variable to be set
* @param rNodes reference to the objective node set
*/
template< class TType >
void SetHistoricalVariableToZero(
const Variable< TType >& rVariable,
NodesContainerType& rNodes)
{
KRATOS_TRY
this->SetVariable(rVariable, rVariable.Zero(), rNodes);
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable non historical
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template< class TVarType >
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable")
void SetNonHistoricalScalarVar(
const TVarType& rVariable,
const double Value,
NodesContainerType& rNodes
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->SetValue(rVariable, Value);
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a vector non historical variable
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable")
void SetNonHistoricalVectorVar(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes
);
/**
* @brief Sets the container value of any type of non historical variable
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rContainer Reference to the objective container
*/
template< class TType, class TContainerType, class TVarType = Variable< TType >>
void SetNonHistoricalVariable(
const TVarType& rVariable,
const TType& Value,
TContainerType& rContainer
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = rContainer.begin() + k;
it_cont->SetValue(rVariable, Value);
}
KRATOS_CATCH("")
}
/**
* @brief Sets the container value of any type of non historical variable (considering flag)
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rContainer Reference to the objective container
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template< class TType, class TContainerType, class TVarType = Variable< TType >>
void SetNonHistoricalVariable(
const TVarType& rVariable,
const TType& rValue,
TContainerType& rContainer,
const Flags Flag,
const bool Check = true
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = rContainer.begin() + k;
if (it_cont->Is(Flag) == Check) {
it_cont->SetValue(rVariable, rValue);
}
}
KRATOS_CATCH("")
}
/**
* @brief Clears the container data value container
* @param rContainer Reference to the objective container
*/
template< class TContainerType>
void ClearNonHistoricalData(TContainerType& rContainer)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Data().Clear();
}
KRATOS_CATCH("")
}
/**
* @brief Distributes variable values in TContainerType container to nodes
*
* This method distributes variables values stored in TContainerType data value container in rModelPart
* to nodes. Constant weighting is used for each node based on rWeightVariable value. The result
* is stored in nodal non-historical data value container under the same rVariable. If IsInverseWeightProvided
* is true, then the weights provided by rWeightVariable is inverted to get nodal weight. Otherwise, the value
* given by rWeightVariable is used as weight.
*
*
* @tparam TDataType Data type
* @tparam TContainerType ContainerType of model part
* @tparam TWeightDataType Data type of weight variable (this should be either int or double)
* @param rModelPart Model part
* @param rVariable Variable to be distributed
* @param rWeightVariable Variable which holds weight to distribute entity values to nodes
* @param IsInverseWeightProvided Whether the weight is provided as inverse or not.
*/
template <class TDataType, class TContainerType, class TWeightDataType>
void WeightedAccumulateVariableOnNodes(
ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const Variable<TWeightDataType>& rWeightVariable,
const bool IsInverseWeightProvided = false);
/**
* @brief Sets a flag according to a given status over a given container
* @param rFlag flag to be set
* @param rFlagValue flag value to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void SetFlag(
const Flags& rFlag,
const bool& rFlagValue,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Set(rFlag, rFlagValue);
}
KRATOS_CATCH("")
}
/**
* @brief Flips a flag over a given container
* @param rFlag flag to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void ResetFlag(
const Flags& rFlag,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Reset(rFlag);
}
KRATOS_CATCH("")
}
/**
* @brief Flips a flag over a given container
* @param rFlag flag to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void FlipFlag(
const Flags& rFlag,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Flip(rFlag);
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of a non-historical vector variable and sets it in other variable
* @param OriginVariable reference to the origin vector variable
* @param SavedVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable")
void SaveVectorVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical scalar variable and sets it in other variable
* @param OriginVariable reference to the origin scalar variable
* @param SavedVariable reference to the destination scalar variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable")
void SaveScalarVar(
const DoubleVarType& OriginVariable,
const DoubleVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical variable and saves it in another variable
* For a nodal container, this takes the value of a non-historical variable and saves it in another one
* @tparam TDataType The variable data type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rSavedVariable Reference to the destination variable
* @param rNodesContainer Reference to the nodal container
*/
template< class TDataType, class TVariableType = Variable<TDataType> >
void SaveVariable(
const TVariableType &rOriginVariable,
const TVariableType &rSavedVariable,
NodesContainerType &rNodesContainer)
{
KRATOS_TRY
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) {
auto it_node = rNodesContainer.begin() + i_node;
it_node->SetValue(rSavedVariable, it_node->FastGetSolutionStepValue(rOriginVariable));
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of a non-historical vector variable and sets it in other non-historical variable
* @param OriginVariable reference to the origin vector variable
* @param SavedVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable")
void SaveVectorNonHistoricalVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical scalar variable and sets it in other non-historical variable
* @param OriginVariable reference to the origin scalar variable
* @param SavedVariable reference to the destination scalar variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable")
void SaveScalarNonHistoricalVar(
const DoubleVarType& OriginVariable,
const DoubleVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical variable and saves it in another historical variable
* For a non-nodal container, this method takes the value of an origin variable and saves it in a destination one
* @tparam TDataType The variable data type
* @tparam TContainerType The container type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rSavedVariable Reference to the destination variable
* @param rContainer Reference to the container of interest
*/
template< class TDataType, class TContainerType, class TVariableType = Variable<TDataType> >
void SaveNonHistoricalVariable(
const TVariableType &rOriginVariable,
const TVariableType &rSavedVariable,
TContainerType &rContainer
)
{
KRATOS_TRY
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) {
auto it_cont = rContainer.begin() + i;
it_cont->SetValue(rSavedVariable, it_cont->GetValue(rOriginVariable));
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of an historical vector variable and sets it in other variable
* @param OriginVariable reference to the origin vector variable
* @param DestinationVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable")
void CopyVectorVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& DestinationVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of an historical double variable and sets it in other variable
* @param OriginVariable reference to the origin double variable
* @param DestinationVariable reference to the destination double variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable")
void CopyScalarVar(
const DoubleVarType &OriginVariable,
const DoubleVarType &DestinationVariable,
NodesContainerType &rNodes);
/**
* @brief Takes the value of an historical variable and sets it in another variable
* This function takes the value of an historical variable and sets in another
* variable in all the nodes of the provided container.
* @tparam TDataType The variable data type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rDestinationVariable Reference to the destination variable
* @param rNodesContainer Reference to the nodes container
*/
template< class TDataType, class TVariableType = Variable<TDataType> >
void CopyVariable(
const TVariableType &rOriginVariable,
const TVariableType &rDestinationVariable,
NodesContainerType &rNodesContainer)
{
KRATOS_TRY
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) {
auto it_node = rNodesContainer.begin() + i_node;
it_node->FastGetSolutionStepValue(rDestinationVariable) = it_node->FastGetSolutionStepValue(rOriginVariable);
}
KRATOS_CATCH("")
}
/**
* @brief Returns a list of nodes filtered using the given double variable and value
* @param Variable reference to the double variable to be filtered
* @param Value Filtering Value
* @param rOriginNodes Reference to the objective node set
* @return selected_nodes: List of filtered nodes
*/
NodesContainerType SelectNodeList(
const DoubleVarType& Variable,
const double Value,
const NodesContainerType& rOriginNodes
);
/**
* @brief Checks if all the nodes of a node set has the specified variable
* @param rVariable reference to a variable to be checked
* @param rNodes reference to the nodes set to be checked
* @return 0: if succeeds, return 0
*/
template<class TVarType>
int CheckVariableExists(
const TVarType& rVariable,
const NodesContainerType& rNodes
)
{
KRATOS_TRY
for (auto& i_node : rNodes)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(rVariable, i_node);
return 0;
KRATOS_CATCH("");
}
/**
* @brief Fixes or frees a variable for all of the nodes in the list. The dof has to exist.
* @param rVar reference to the variable to be fixed or freed
* @param IsFixed if true fixes, if false frees
* @param rNodes reference to the nodes set to be frixed or freed
*/
template< class TVarType >
void ApplyFixity(
const TVarType& rVar,
const bool IsFixed,
NodesContainerType& rNodes
)
{
KRATOS_TRY
if (rNodes.size() != 0) {
// checking the first node to avoid error being thrown in parallel region
KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << rNodes.begin()->Id() << "!" << std::endl;
#ifdef KRATOS_DEBUG
for (const auto& r_node : rNodes) {
KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << r_node.Id() << "!" << std::endl;
}
#endif
CheckVariableExists(rVar, rNodes);
if (IsFixed) {
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->pGetDof(rVar)->FixDof();
}
} else {
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->pGetDof(rVar)->FreeDof();
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Fixes/Frees dofs based on a flag
*
* This method fixes/frees given rVariable, if rFlag matches CheckValue provided for that
* specific node.
*
* @tparam TVarType Variable type
* @param rVariable Variable to be fixed or freed
* @param IsFixed True to fix variable, false to free variable
* @param rNodes Nodes container
* @param rFlag Flag to be checked to fix or free
* @param CheckValue Flag value which is checked against
*/
template< class TVarType >
void ApplyFixity(
const TVarType& rVariable,
const bool IsFixed,
NodesContainerType& rNodes,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
if (rNodes.size() != 0) {
// checking the first node to avoid error being thrown in parallel region
KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVariable))
<< "Trying to fix/free dof of variable " << rVariable.Name()
<< " but this dof does not exist in node #"
<< rNodes.begin()->Id() << "!" << std::endl;
#ifdef KRATOS_DEBUG
for (const auto& r_node : rNodes) {
KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVariable))
<< "Trying to fix/free dof of variable " << rVariable.Name()
<< " but this dof does not exist in node #" << r_node.Id()
<< "!" << std::endl;
}
#endif
CheckVariableExists(rVariable, rNodes);
if (IsFixed) {
BlockPartition<NodesContainerType>(rNodes).for_each(
[&rVariable, &rFlag, CheckValue](NodeType& rNode) {
if (rNode.Is(rFlag) == CheckValue) {
rNode.pGetDof(rVariable)->FixDof();
}
});
}
else {
BlockPartition<NodesContainerType>(rNodes).for_each(
[&rVariable, &rFlag, CheckValue](NodeType& rNode) {
if (rNode.Is(rFlag) == CheckValue) {
rNode.pGetDof(rVariable)->FreeDof();
}
});
}
}
KRATOS_CATCH("");
}
/**
* @brief Loops along a vector data to set its values to the nodes contained in a node set.
* @note This function is suitable for scalar historical variables, since each
* one of the values in the data vector is set to its correspondent node. Besides,
* the values must be sorted as the nodes are (value i corresponds to node i).
* @param rVar reference to the variable to be fixed or freed
* @param rData rData vector. Note that its lenght must equal the number of nodes
* @param rNodes reference to the nodes set to be set
*/
template< class TVarType >
void ApplyVector(
const TVarType& rVar,
const Vector& rData,
NodesContainerType& rNodes
)
{
KRATOS_TRY
if(rNodes.size() != 0 && rNodes.size() == rData.size()) {
// First we do a check
CheckVariableExists(rVar, rNodes);
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVar) = rData[k];
}
} else
KRATOS_ERROR << "There is a mismatch between the size of data array and the number of nodes ";
KRATOS_CATCH("")
}
/**
* @brief Returns the nodal value summation of a non-historical vector variable.
* @param rVar reference to the vector variable to summed
* @param rModelPart reference to the model part that contains the objective node set
* @return sum_value: summation vector result
*/
array_1d<double, 3> SumNonHistoricalNodeVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the nodal value summation of a non-historical scalar variable.
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective node set
* @return sum_value: summation result
*/
template< class TVarType >
double SumNonHistoricalNodeScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_nodes_array = r_local_mesh.Nodes();
const auto it_node_begin = r_nodes_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_nodes_array.size()); ++k) {
const auto it_node = it_node_begin + k;
sum_value += it_node->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief This method accumulates and return a variable value
* For a nodal historical variable, this method accumulates and
* returns the summation in a model part.
* @tparam TDataType Variable datatype
* @tparam Variable<TDataType> Variable type
* @param rVariable Nodal historical variable to be accumulated
* @param rModelPart Model part in where the summation is done
* @param BuffStep Buffer position
* @return TDataType Value of the summation
*/
template< class TDataType, class TVarType = Variable<TDataType> >
TDataType SumHistoricalVariable(
const TVarType &rVariable,
const ModelPart &rModelPart,
const unsigned int BuffStep = 0
)
{
KRATOS_TRY
TDataType sum_value;
AuxiliaryInitializeValue(sum_value);
const auto &r_communicator = rModelPart.GetCommunicator();
const int n_nodes = r_communicator.LocalMesh().NumberOfNodes();
#pragma omp parallel firstprivate(n_nodes)
{
TDataType private_sum_value;
AuxiliaryInitializeValue(private_sum_value);
#pragma omp for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
const auto it_node = r_communicator.LocalMesh().NodesBegin() + i_node;
private_sum_value += it_node->GetSolutionStepValue(rVariable, BuffStep);
}
AuxiliaryAtomicAdd(private_sum_value, sum_value);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief Returns the condition value summation of a historical vector variable
* @param rVar reference to the vector variable to be summed
* @param rModelPart reference to the model part that contains the objective condition set
* @return sum_value: summation result
*/
array_1d<double, 3> SumConditionVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the condition value summation of a historical scalar variable
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective condition set
* @return sum_value: summation result
*/
template< class TVarType >
double SumConditionScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_conditions_array = r_local_mesh.Conditions();
const auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_conditions_array.size()); ++k) {
const auto it_cond = it_cond_begin + k;
sum_value += it_cond->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief Returns the element value summation of a historical vector variable
* @param rVar reference to the vector variable to be summed
* @param rModelPart reference to the model part that contains the objective element set
* @return sum_value: summation result
*/
array_1d<double, 3> SumElementVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the element value summation of a historical scalar variable
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective element set
* @return sum_value: summation result
*/
template< class TVarType >
double SumElementScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_elements_array = r_local_mesh.Elements();
const auto it_elem_begin = r_elements_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_elements_array.size()); ++k) {
const auto it_elem = it_elem_begin + k;
sum_value += it_elem->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel
* @param rVar The variable to be added as DoF
* @param rModelPart reference to the model part that contains the objective element set
*/
template< class TVarType >
void AddDof(
const TVarType& rVar,
ModelPart& rModelPart
)
{
KRATOS_TRY
// First we do a chek
KRATOS_CHECK_VARIABLE_KEY(rVar)
if(rModelPart.NumberOfNodes() != 0)
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: Variable : " << rVar << "not included in the Solution step data ";
rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar);
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) {
auto it_node = rModelPart.NodesBegin() + k;
it_node->AddDof(rVar);
}
KRATOS_CATCH("")
}
/**
* @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel
* @param rVar The variable to be added as DoF
* @param rReactionVar The corresponding reaction to the added DoF
* @param rModelPart reference to the model part that contains the objective element set
*/
template< class TVarType >
void AddDofWithReaction(
const TVarType& rVar,
const TVarType& rReactionVar,
ModelPart& rModelPart
)
{
KRATOS_TRY
KRATOS_CHECK_VARIABLE_KEY(rVar)
KRATOS_CHECK_VARIABLE_KEY(rReactionVar)
if(rModelPart.NumberOfNodes() != 0) {
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: DoF Variable : " << rVar << "not included in the Soluttion step data ";
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rReactionVar)) << "ERROR:: Reaction Variable : " << rReactionVar << "not included in the Soluttion step data ";
}
// If in debug we do a check for all nodes
#ifdef KRATOS_DEBUG
CheckVariableExists(rVar, rModelPart.Nodes());
CheckVariableExists(rReactionVar, rModelPart.Nodes());
#endif
rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar, &rReactionVar);
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) {
auto it_node = rModelPart.NodesBegin() + k;
it_node->AddDof(rVar,rReactionVar);
}
KRATOS_CATCH("")
}
/**
* @brief This method checks the variable keys
* @return True if all the keys are correct
*/
bool CheckVariableKeys();
/**
* @brief This method checks the dofs
* @param rModelPart reference to the model part that contains the objective element set
* @return True if all the DoFs are correct
*/
bool CheckDofs(ModelPart& rModelPart);
/**
* @brief This method updates the current nodal coordinates back to the initial coordinates
* @param rNodes the nodes to be updated
*/
void UpdateCurrentToInitialConfiguration(const ModelPart::NodesContainerType& rNodes);
/**
* @param rNodes the nodes to be updated
* @brief This method updates the initial nodal coordinates to the current coordinates
*/
void UpdateInitialToCurrentConfiguration(const ModelPart::NodesContainerType& rNodes);
/**
* @brief This method updates the current coordinates
* For each node, this method takes the value of the provided variable and updates the
* current position as the initial position (X0, Y0, Z0) plus such variable value
* @param rNodes
* @param rUpdateVariable variable to retrieve the updating values from
*/
void UpdateCurrentPosition(
const ModelPart::NodesContainerType& rNodes,
const ArrayVarType& rUpdateVariable = DISPLACEMENT,
const IndexType BufferPosition = 0
);
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief Auxiliary double initialize method
* Auxiliary method to initialize a double value
* @param rValue Variable to initialize
*/
void AuxiliaryInitializeValue(double &rValue);
/**
* @brief Auxiliary array initialize method
* Auxiliary method to initialize an array value
* @param rValue Variable to initialize
*/
void AuxiliaryInitializeValue(array_1d<double,3> &rValue);
/**
* @brief Auxiliary scalar reduce method
* Auxiliary method to perform the reduction of a scalar value
* @param rPrivateValue Private variable to reduce
* @param rSumValue Variable to save the reduction
*/
void AuxiliaryAtomicAdd(
const double &rPrivateValue,
double &rSumValue
);
/**
* @brief Auxiliary array reduce method
* Auxiliary method to perform the reduction of an array value
* @param rPrivateValue Private variable to reduce
* @param rSumValue Variable to save the reduction
*/
void AuxiliaryAtomicAdd(
const array_1d<double,3> &rPrivateValue,
array_1d<double,3> &rSumValue
);
/**
* @brief This is auxiliar method to check the keys
* @return True if all the keys are OK
*/
template< class TVarType >
bool CheckVariableKeysHelper()
{
KRATOS_TRY
for (const auto& var : KratosComponents< TVarType >::GetComponents()) {
if (var.first == "NONE" || var.first == "")
std::cout << " var first is NONE or empty " << var.first << var.second << std::endl;
if (var.second->Name() == "NONE" || var.second->Name() == "")
std::cout << var.first << var.second << std::endl;
if (var.first != var.second->Name()) //name of registration does not correspond to the var name
std::cout << "Registration Name = " << var.first << " Variable Name = " << std::endl;
KRATOS_ERROR_IF((var.second)->Key() == 0) << (var.second)->Name() << " Key is 0." << std::endl \
<< "Check that Kratos variables have been correctly registered and all required applications have been imported." << std::endl;
}
return true;
KRATOS_CATCH("")
}
template <class TContainerType>
TContainerType& GetContainer(ModelPart& rModelPart);
template <class TContainerType>
const TContainerType& GetContainer(const ModelPart& rModelPart);
template <class TContainerType, class TSetterFunction, class TGetterFunction>
void CopyModelPartFlaggedVariable(
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue,
TSetterFunction&& rSetterFunction,
TGetterFunction&& rGetterFunction)
{
KRATOS_TRY
const auto& r_origin_container = GetContainer<TContainerType>(rOriginModelPart);
auto& r_destination_container = GetContainer<TContainerType>(rDestinationModelPart);
const int number_of_origin_items = r_origin_container.size();
const int number_of_destination_items = r_destination_container.size();
KRATOS_ERROR_IF_NOT(number_of_origin_items == number_of_destination_items)
<< "Origin ( " << rOriginModelPart.Name() << " ) and destination ( "
<< rDestinationModelPart.Name() << " ) model parts have different number of items."
<< "\n\t- Number of origin items: " << number_of_origin_items
<< "\n\t- Number of destination items: " << number_of_destination_items
<< std::endl;
IndexPartition<int>(number_of_origin_items).for_each([&](int i_node) {
const auto& r_orig_item = *(r_origin_container.begin() + i_node);
auto& r_dest_item = *(r_destination_container.begin() + i_node);
if (r_orig_item.Is(rFlag) == CheckValue) {
rSetterFunction(r_dest_item, rGetterFunction(r_orig_item));
}
});
KRATOS_CATCH("");
}
///@}
///@name Private Acces
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class VariableUtils */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_VARIABLE_UTILS defined */
|
nn07.c | /*
Description:
This program implements a Neural Network of one hidden and one output layer
Abides by Lab 5 Exercise 4 requirements
Author:
Georgios Evangelou (1046900)
Year: 5
Parallel Programming in Machine Learning Problems
Electrical and Computer Engineering Department, University of Patras
System Specifications:
CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips)
GPU: Nvidia GTX 1050 (dual-fan, overclocked)
RAM: 8GB (dual-channel, @2666 MHz)
Version Notes:
Compiles/Runs/Debugs with: gcc nn07.c -o nn07 -lm -O3 -fopenmp -fopt-info -pg && time ./nn07 && gprof ./nn07
Major refresh from previous versions
Accuracy achieved with:
unity neuron: disabled, epochs=15, batch size=100, learning rate=0.3, no multithreading:
Train/Test data: 99.00%/49.30% accuracy
Time: 02 minutes 01 seconds
unity neuron: enabled, epochs=15, batch size=100, learning rate=0.2, no multithreading:
Train/Test data: 99.00%/47.35% accuracy
Time: 02 minutes 02 seconds
*/
// ****************************************************************************************************************
#pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations
#pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system
#pragma GCC target("avx") //Enable AVX
// ****************************************************************************************************************
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "omp.h"
#include "string.h"
// ****************************************************************************************************************
#define LAYER1_NEURONS 100 //Number of 1st layer neurons
#define LAYER2_NEURONS 10 //Number of 2nd layer neurons
#define DEBUG 0 //Debugging options
#define LEARNING_RATE 0.2 //Learning rate (0.0001)
#define UNITY_NEURON 1 //The unity neuron
#define TRAIN_FILE_PATH "fashion-mnist_train.csv"
#define TEST_FILE_PATH "fashion-mnist_test.csv"
#define TRAIN_DATA_NUMBER 60000
#define TEST_DATA_NUMBER 10000
#define PIXELS 784
#define EPOCHS 15
#define BATCH_SIZE 100
double TRAIN_DATA[TRAIN_DATA_NUMBER][PIXELS+1]; //The train images (class and pixels)
double TEST_DATA[TEST_DATA_NUMBER][PIXELS+1]; //The test images (class and pixels)
double TRAIN_GOLDEN_OUTPUTS[TRAIN_DATA_NUMBER][LAYER2_NEURONS];
double TEST_GOLDEN_OUTPUTS[TEST_DATA_NUMBER][LAYER2_NEURONS];
double WL1[LAYER1_NEURONS][PIXELS+UNITY_NEURON]; //The weights of the 1st layer
double WL2[LAYER2_NEURONS][LAYER1_NEURONS+UNITY_NEURON]; //The weights of the 2nd layer
double WL1delta[LAYER1_NEURONS][PIXELS+UNITY_NEURON]; //The new weights of the 1st layer
double WL2delta[LAYER2_NEURONS][LAYER1_NEURONS+UNITY_NEURON]; //The new weights of the 2nd layer
double DL1[LAYER1_NEURONS]; //The inner states of 1st layer
double DL2[LAYER2_NEURONS]; //The inner states of 2nd layer
double OL1[LAYER1_NEURONS]; //The outer states of 1st layer
double OL2[LAYER2_NEURONS]; //The outer states of 2nd layer
double EL1[LAYER1_NEURONS]; //The errors of the 1st layer
double EL2[LAYER2_NEURONS]; //The errors of the 2nd layer
void InitializeWeights() {
for (int i=0; i<LAYER1_NEURONS; i++) {
for (int j=0; j<PIXELS+UNITY_NEURON; j++) {
WL1[i][j] = 2 * ((double)rand())/((double)RAND_MAX) - 1;
}
}
for (int i=0; i<LAYER2_NEURONS; i++) {
for (int j=0; j<LAYER1_NEURONS+UNITY_NEURON; j++) {
WL2[i][j] = 2 * ((double)rand())/((double)RAND_MAX) - 1;
}
}
}
void InitializeDeltas() {
for (int i=0; i<LAYER1_NEURONS; i++)
for (int j=0; j<PIXELS+UNITY_NEURON; j++)
WL1delta[i][j] = 0;
for (int i=0; i<LAYER2_NEURONS; i++)
for (int j=0; j<LAYER1_NEURONS+UNITY_NEURON; j++)
WL2delta[i][j] = 0;
}
void AcquireData() {
FILE *fp1 = fopen(TRAIN_FILE_PATH, "r");
char *token1;
if(fp1 != NULL) {
char line[PIXELS*6];
int picture = 0;
while(fgets(line, sizeof line, fp1) != NULL) {
token1 = strtok(line, ",");
int element = 0;
while(token1 != NULL) {
TRAIN_DATA[picture][element++] = atoi(token1);
token1 = strtok(NULL, ",");
}
picture++;
}
fclose(fp1);
}
FILE *fp2 = fopen(TEST_FILE_PATH, "r");
char *token2;
if(fp2 != NULL) {
char line[PIXELS*6];
int picture = 0;
while(fgets(line, sizeof line, fp2) != NULL) {
token2 = strtok(line, ",");
int element = 0;
while(token2 != NULL) {
TEST_DATA[picture][element++] = atoi(token2);
token2 = strtok(NULL, ",");
}
picture++;
}
fclose(fp2);
}
}
void AcquireGoldenOutputs() {
for (int p=0; p<TRAIN_DATA_NUMBER; p++) {
for (int i=0; i<LAYER2_NEURONS; i++) {
if (i == (int) TRAIN_DATA[p][0]) {
TRAIN_GOLDEN_OUTPUTS[p][i] = 0.9;
}
else
TRAIN_GOLDEN_OUTPUTS[p][i] = 0.1;
}
}
for (int p=0; p<TEST_DATA_NUMBER; p++)
for (int i=0; i<LAYER2_NEURONS; i++) {
if (i == (int) TEST_DATA[p][0])
TEST_GOLDEN_OUTPUTS[p][i] = 0.9;
else
TEST_GOLDEN_OUTPUTS[p][i] = 0.1;
}
}
void ActivateNetwork(double input[LAYER1_NEURONS]) {
for (int n=0; n<LAYER1_NEURONS; n++) {
double innerState = (UNITY_NEURON==1)?WL1[n][PIXELS]:0;
for (int i=0; i<PIXELS; i++) {
innerState += input[i] * WL1[n][i];
}
OL1[n] = 1 / (1+exp(-innerState));
}
for (int n=0; n<LAYER2_NEURONS; n++) {
double innerState = (UNITY_NEURON==1)?WL2[n][LAYER1_NEURONS]:0;
for (int i=0; i<LAYER1_NEURONS; i++) {
innerState += OL1[i] * WL2[n][i];
}
OL2[n] = 1 / (1+exp(-innerState));
}
}
double NeuronOutputDerivative(double output) {
return output * (1.0 - output);
}
void ErrorBackPropagation(double expectedOutput[LAYER2_NEURONS]) {
for (int c=0; c<LAYER2_NEURONS; c++) {
EL2[c] = (expectedOutput[c] - OL2[c]) * NeuronOutputDerivative(OL2[c]);
}
//#pragma omp parallel for schedule(static, 10) //WORSENS PERFORMANCE
for (int c=0; c<LAYER1_NEURONS; c++) {
double error = 0.0;
for (int n=0; n<LAYER2_NEURONS; n++) {
error += WL1[n][c] * EL2[n];
}
EL1[c] = error * NeuronOutputDerivative(OL1[c]);
}
}
void UpdateWeightsDeltas(double inputs[PIXELS]) {
for (int n=0; n<LAYER1_NEURONS; n++) {
for (int i=0; i<PIXELS; i++) {
WL1delta[n][i] += EL1[n] * inputs[i] / ((double) BATCH_SIZE);
}
if (UNITY_NEURON==1) WL1delta[n][PIXELS] += EL1[n] / ((double) BATCH_SIZE);
}
for (int n=0; n<LAYER2_NEURONS; n++) {
for (int i=0; i<LAYER1_NEURONS; i++) {
WL2delta[n][i] += EL2[n] * OL1[i] / ((double) BATCH_SIZE);
}
if (UNITY_NEURON==1) WL2delta[n][LAYER1_NEURONS] += EL2[n] / ((double) BATCH_SIZE);
}
}
void UpdateWeights() {
for (int n=0; n<LAYER1_NEURONS; n++) {
for (int i=0; i<PIXELS+UNITY_NEURON; i++) {
WL1[n][i] += LEARNING_RATE * WL1delta[n][i];
}
}
for (int n=0; n<LAYER2_NEURONS; n++) {
for (int i=0; i<LAYER1_NEURONS+UNITY_NEURON; i++) {
WL2[n][i] += LEARNING_RATE * WL2delta[n][i];
}
}
}
int estimatedClass(double *array) {
double mx = 0;
int mx_index = -1;
for (int i=0; i<LAYER2_NEURONS; i++) {
if (array[i] > mx) {
mx = array[i];
mx_index = i;
}
}
return mx_index;
}
int main() {
printf("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf("This program implements a Neural Network of %d Layers.\n", 2);
printf("Inputs: %d, Hidden layer neurons: %d, Output layer neurons: %d\n", PIXELS, LAYER1_NEURONS, LAYER2_NEURONS);
printf("Epochs: %d, Batches per epoch: %d, Train data: %d\n", EPOCHS, TRAIN_DATA_NUMBER/BATCH_SIZE, TRAIN_DATA_NUMBER);
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n");
__time_t seconds;
srand(seconds);
InitializeWeights();
AcquireData();
AcquireGoldenOutputs();
int step=0, batch=0, epoch=0;
printf("--- Now training the Neural Network using the train data... ---\n");
for (int epoch=0; epoch<EPOCHS; epoch++) {
printf("> Epoch: %2d", epoch+1);
int hits = 0;
for (int batch=0; batch<TRAIN_DATA_NUMBER/BATCH_SIZE; batch++) {
InitializeDeltas();
//printf("\r >> BATCH: %d", batch+1);
for (int step=0; step<BATCH_SIZE; step++) {
//printf(">>> STEP: %d\n", step+1);
int picture = step % TRAIN_DATA_NUMBER;
ActivateNetwork(&TRAIN_DATA[picture][1]);
ErrorBackPropagation(TRAIN_GOLDEN_OUTPUTS[picture]);
UpdateWeightsDeltas(&TRAIN_DATA[picture][1]);
if (estimatedClass(OL2) == (int) TRAIN_DATA[picture][0]) hits++;
}
UpdateWeights();
}
printf(" --> Accuracy: %.2lf%%\n", ((double)100*hits)/((double)TRAIN_DATA_NUMBER));
}
int hits = 0;
printf("\n--- Now evaluating the Neural Network using the test data... --\n");
for (int step=0; step<TEST_DATA_NUMBER; step++) {
int picture = step ;
ActivateNetwork(&TEST_DATA[picture][1]);
if (estimatedClass(OL2) == (int) TEST_DATA[picture][0]) hits++;
printf("\r> Test data: %d Total Accuracy: %.2lf%%", step+1, ((double)100*hits)/((double)TEST_DATA_NUMBER));
}
}
|
sum_int.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 120000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(int *X) {
for (int i = 0; i<N; i++) {
X[i] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
int sum(int *X) {
int result = 0;
#pragma omp simd
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
// Debug functions
int sum_serial(int *X) {
int result = 0;
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
void print_vector(int *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%d ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
int *X = malloc(sizeof(int)*N);
int result, result_serial;
srand(time(NULL));
init(X);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
result = sum(X);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
result_serial = sum_serial(X);
double t_serial = (read_timer() - start_serial);
print_vector(X);
puts("=\n");
printf("SIMD: %d\n", result);
puts("---------------------------------");
printf("Serial: %d\n", result_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %d\n", result_serial - result);
free(X);
return 0;
}
|
GB_unaryop__one_int8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_int8_int8
// op(A') function: GB_tran__one_int8_int8
// C type: int8_t
// A type: int8_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_int8_int8
(
int8_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
08 - openMP.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define SIZE_GAU 8
#define SIZE_JVL 15
int JAVA_COUNT = SIZE_JVL;
omp_lock_t table;
omp_lock_t hungry;
void replaceJAVALIS()
{
omp_set_lock(&table);
JAVA_COUNT = SIZE_JVL;
printf(" < The Cooker Made his Magic n Replace All The Java's >\n\n");
omp_unset_lock(&hungry);
}
void *cooker(void *g)
{
while(1)
{
replaceJAVALIS();
}
}
void eatJAVALI(void *g)
{
printf(" - G[%d] is eating the lucky javali %d n very happy.\n",g, JAVA_COUNT+1);
}
void takeJavali(void *g)
{
#pragma omp critical(section1)
{
if(JAVA_COUNT == 0)
{
printf(" - Gaules [%d] get hungry first n wake up the cooker\n",g);
omp_unset_lock(&table);
omp_set_lock(&hungry);
}
JAVA_COUNT--;
eatJAVALI(g);
}
}
void gaules(void *g)
{
while(1)
{
takeJavali(g);
sleep(1);
}
}
int main()
{
omp_set_lock(&table);
omp_set_lock(&hungry);
printf("-----------------[Dinner is Served]--------------\n");
#pragma omp parallel num_threads(SIZE_GAU+1) //cria threads em OMP
{
int g;
g = omp_get_thread_num();
if(g == 0)
{
cooker(g);
}
gaules(g);
}
printf("-----------------[Dinner is Over]----------------\n");
}
|
axpy_float.c | //axpy.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 20
#define N 102400000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *X, float *Y) {
for (int i = 0; i<N; i++) {
X[i] = (float)rand()/(float)(RAND_MAX/10.0);
Y[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void axpy(float *X, float *Y, float a) {
#pragma omp simd
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
// Debug functions
void axpy_serial(float *X, float *Y, float a) {
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
void print_vector(float *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
float check(float *A, float *B){
float difference = 0;
for(int i = 0;i<N; i++){
difference += A[i]- B[i];
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
float *X = malloc(sizeof(float)*N);
float *Y = malloc(sizeof(float)*N);
float *Y_serial = malloc(sizeof(float)*N);
float a = 3.14;
srand(time(NULL));
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
print_vector(Y);
print_vector(X);
printf("%.2f\n", a);
puts("=\n");
//warming up
axpy(X, Y, a);
axpy_serial(X, Y_serial, a);
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
double t = 0;
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy(X, Y, a);
t += (read_timer() - start);
double t_serial = 0;
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy_serial(X, Y_serial, a);
t_serial += (read_timer() - start_serial);
print_vector(Y);
puts("---------------------------------");
print_vector(Y_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("AXPY (SIMD):\t\t%4f\t%4f\n", t/N_RUNS, gflops);
printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial/N_RUNS, gflops_serial);
printf("Correctness check: %f\n", check(Y,Y_serial));
free(X);
free(Y);
free(Y_serial);
return 0;
}
|
GB_unaryop__lnot_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_uint8
// op(A') function: GB_tran__lnot_bool_uint8
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_uint8
(
bool *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack8to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 4b-8a-inch/8a-64-outch/4b
kernel_tm_pack8to4.create(2 * inch / 8, 64, outch / 8 + (outch % 8) / 4, (size_t)2u * 32, 32);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
const Mat k4 = kernel_tm.channel(p + 4);
const Mat k5 = kernel_tm.channel(p + 5);
const Mat k6 = kernel_tm.channel(p + 6);
const Mat k7 = kernel_tm.channel(p + 7);
Mat g0 = kernel_tm_pack8to4.channel(p / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = (__fp16)k0.row(q + i)[k];
g00[1] = (__fp16)k1.row(q + i)[k];
g00[2] = (__fp16)k2.row(q + i)[k];
g00[3] = (__fp16)k3.row(q + i)[k];
g00[4] = (__fp16)k4.row(q + i)[k];
g00[5] = (__fp16)k5.row(q + i)[k];
g00[6] = (__fp16)k6.row(q + i)[k];
g00[7] = (__fp16)k7.row(q + i)[k];
g00 += 8;
}
}
}
}
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
Mat g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = (__fp16)k0.row(q + i)[k];
g00[1] = (__fp16)k1.row(q + i)[k];
g00[2] = (__fp16)k2.row(q + i)[k];
g00[3] = (__fp16)k3.row(q + i)[k];
g00 += 4;
}
}
}
}
}
static void conv3x3s1_winograd64_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[8][8][8];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8;
for (int m = 0; m < 8; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _r06 = vld1q_f16(r0 + 48);
float16x8_t _r07 = vld1q_f16(r0 + 56);
float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f);
float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b);
vst1q_f16(tmp[5][m], _tmp5m);
vst1q_f16(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
__fp16* r0_tm_6 = r0_tm_0 + tiles * 48;
__fp16* r0_tm_7 = r0_tm_0 + tiles * 56;
for (int m = 0; m < 8; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f);
float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
vst1q_f16(r0_tm_6, _r0tm6);
vst1q_f16(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 64;
r0_tm_1 += tiles * 64;
r0_tm_2 += tiles * 64;
r0_tm_3 += tiles * 64;
r0_tm_4 += tiles * 64;
r0_tm_5 += tiles * 64;
r0_tm_6 += tiles * 64;
r0_tm_7 += tiles * 64;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * 4, 4, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
__fp16* output0_tm = top_blob_tm.channel(p);
__fp16* output1_tm = top_blob_tm.channel(p + 1);
const Mat kernel01_tm = kernel_tm.channel(p / 2);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n"
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[1] \n"
"fmla v26.8h, v16.8h, v0.h[2] \n"
"fmla v27.8h, v16.8h, v0.h[3] \n"
"fmla v28.8h, v16.8h, v0.h[4] \n"
"fmla v29.8h, v16.8h, v0.h[5] \n"
"fmla v30.8h, v16.8h, v0.h[6] \n"
"fmla v31.8h, v16.8h, v0.h[7] \n"
"fmla v24.8h, v17.8h, v1.h[0] \n"
"fmla v25.8h, v17.8h, v1.h[1] \n"
"fmla v26.8h, v17.8h, v1.h[2] \n"
"fmla v27.8h, v17.8h, v1.h[3] \n"
"fmla v28.8h, v17.8h, v1.h[4] \n"
"fmla v29.8h, v17.8h, v1.h[5] \n"
"fmla v30.8h, v17.8h, v1.h[6] \n"
"fmla v31.8h, v17.8h, v1.h[7] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v24.8h, v18.8h, v2.h[0] \n"
"fmla v25.8h, v18.8h, v2.h[1] \n"
"fmla v26.8h, v18.8h, v2.h[2] \n"
"fmla v27.8h, v18.8h, v2.h[3] \n"
"fmla v28.8h, v18.8h, v2.h[4] \n"
"fmla v29.8h, v18.8h, v2.h[5] \n"
"fmla v30.8h, v18.8h, v2.h[6] \n"
"fmla v31.8h, v18.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n"
"fmla v24.8h, v19.8h, v3.h[0] \n"
"fmla v25.8h, v19.8h, v3.h[1] \n"
"fmla v26.8h, v19.8h, v3.h[2] \n"
"fmla v27.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v19.8h, v3.h[4] \n"
"fmla v29.8h, v19.8h, v3.h[5] \n"
"fmla v30.8h, v19.8h, v3.h[6] \n"
"fmla v31.8h, v19.8h, v3.h[7] \n"
"fmla v24.8h, v20.8h, v4.h[0] \n"
"fmla v25.8h, v20.8h, v4.h[1] \n"
"fmla v26.8h, v20.8h, v4.h[2] \n"
"fmla v27.8h, v20.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[5] \n"
"fmla v30.8h, v20.8h, v4.h[6] \n"
"fmla v31.8h, v20.8h, v4.h[7] \n"
"fmla v24.8h, v21.8h, v5.h[0] \n"
"fmla v25.8h, v21.8h, v5.h[1] \n"
"fmla v26.8h, v21.8h, v5.h[2] \n"
"fmla v27.8h, v21.8h, v5.h[3] \n"
"fmla v28.8h, v21.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[6] \n"
"fmla v31.8h, v21.8h, v5.h[7] \n"
"fmla v24.8h, v22.8h, v6.h[0] \n"
"fmla v25.8h, v22.8h, v6.h[1] \n"
"fmla v26.8h, v22.8h, v6.h[2] \n"
"fmla v27.8h, v22.8h, v6.h[3] \n"
"fmla v28.8h, v22.8h, v6.h[4] \n"
"fmla v29.8h, v22.8h, v6.h[5] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v23.8h, v7.h[0] \n"
"fmla v25.8h, v23.8h, v7.h[1] \n"
"fmla v26.8h, v23.8h, v7.h[2] \n"
"fmla v27.8h, v23.8h, v7.h[3] \n"
"fmla v28.8h, v23.8h, v7.h[4] \n"
"fmla v29.8h, v23.8h, v7.h[5] \n"
"fmla v30.8h, v23.8h, v7.h[6] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"ext v28.16b, v28.16b, v28.16b, #8 \n"
"ext v29.16b, v29.16b, v29.16b, #8 \n"
"ext v30.16b, v30.16b, v30.16b, #8 \n"
"ext v31.16b, v31.16b, v31.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n"
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[1] \n"
"fmla v26.8h, v16.8h, v0.h[2] \n"
"fmla v27.8h, v16.8h, v0.h[3] \n"
"fmla v24.8h, v17.8h, v0.h[4] \n"
"fmla v25.8h, v17.8h, v0.h[5] \n"
"fmla v26.8h, v17.8h, v0.h[6] \n"
"fmla v27.8h, v17.8h, v0.h[7] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v24.8h, v18.8h, v1.h[0] \n"
"fmla v25.8h, v18.8h, v1.h[1] \n"
"fmla v26.8h, v18.8h, v1.h[2] \n"
"fmla v27.8h, v18.8h, v1.h[3] \n"
"fmla v24.8h, v19.8h, v1.h[4] \n"
"fmla v25.8h, v19.8h, v1.h[5] \n"
"fmla v26.8h, v19.8h, v1.h[6] \n"
"fmla v27.8h, v19.8h, v1.h[7] \n"
"fmla v24.8h, v20.8h, v2.h[0] \n"
"fmla v25.8h, v20.8h, v2.h[1] \n"
"fmla v26.8h, v20.8h, v2.h[2] \n"
"fmla v27.8h, v20.8h, v2.h[3] \n"
"fmla v24.8h, v21.8h, v2.h[4] \n"
"fmla v25.8h, v21.8h, v2.h[5] \n"
"fmla v26.8h, v21.8h, v2.h[6] \n"
"fmla v27.8h, v21.8h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v22.8h, v3.h[0] \n"
"fmla v25.8h, v22.8h, v3.h[1] \n"
"fmla v26.8h, v22.8h, v3.h[2] \n"
"fmla v27.8h, v22.8h, v3.h[3] \n"
"fmla v24.8h, v23.8h, v3.h[4] \n"
"fmla v25.8h, v23.8h, v3.h[5] \n"
"fmla v26.8h, v23.8h, v3.h[6] \n"
"fmla v27.8h, v23.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"ext v24.16b, v24.16b, v24.16b, #8 \n"
"ext v25.16b, v25.16b, v25.16b, #8 \n"
"ext v26.16b, v26.16b, v26.16b, #8 \n"
"ext v27.16b, v27.16b, v27.16b, #8 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(kptr) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel01_tm.row<const __fp16>(r);
float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f);
for (int q = 0; q < inch; q++)
{
float16x8_t _r0 = vld1q_f16(r0);
float16x8_t _k0 = vld1q_f16(kptr);
float16x8_t _k1 = vld1q_f16(kptr + 8);
float16x8_t _k2 = vld1q_f16(kptr + 16);
float16x8_t _k3 = vld1q_f16(kptr + 24);
float16x8_t _k4 = vld1q_f16(kptr + 32);
float16x8_t _k5 = vld1q_f16(kptr + 40);
float16x8_t _k6 = vld1q_f16(kptr + 48);
float16x8_t _k7 = vld1q_f16(kptr + 56);
_sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0);
_sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1);
_sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2);
_sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3);
_sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4);
_sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5);
_sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6);
_sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7);
kptr += 64;
r0 += 8;
}
vst1_f16(output0_tm, vget_low_f16(_sum0));
vst1_f16(output1_tm, vget_high_f16(_sum0));
output0_tm += 4;
output1_tm += 4;
}
}
}
remain_outch_start += nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n"
"fmla v24.4h, v16.4h, v0.h[0] \n"
"fmla v25.4h, v16.4h, v0.h[1] \n"
"fmla v26.4h, v16.4h, v0.h[2] \n"
"fmla v27.4h, v16.4h, v0.h[3] \n"
"fmla v28.4h, v16.4h, v0.h[4] \n"
"fmla v29.4h, v16.4h, v0.h[5] \n"
"fmla v30.4h, v16.4h, v0.h[6] \n"
"fmla v31.4h, v16.4h, v0.h[7] \n"
"fmla v24.4h, v17.4h, v1.h[0] \n"
"fmla v25.4h, v17.4h, v1.h[1] \n"
"fmla v26.4h, v17.4h, v1.h[2] \n"
"fmla v27.4h, v17.4h, v1.h[3] \n"
"fmla v28.4h, v17.4h, v1.h[4] \n"
"fmla v29.4h, v17.4h, v1.h[5] \n"
"fmla v30.4h, v17.4h, v1.h[6] \n"
"fmla v31.4h, v17.4h, v1.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n"
"fmla v24.4h, v18.4h, v2.h[0] \n"
"fmla v25.4h, v18.4h, v2.h[1] \n"
"fmla v26.4h, v18.4h, v2.h[2] \n"
"fmla v27.4h, v18.4h, v2.h[3] \n"
"fmla v28.4h, v18.4h, v2.h[4] \n"
"fmla v29.4h, v18.4h, v2.h[5] \n"
"fmla v30.4h, v18.4h, v2.h[6] \n"
"fmla v31.4h, v18.4h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n"
"fmla v24.4h, v19.4h, v3.h[0] \n"
"fmla v25.4h, v19.4h, v3.h[1] \n"
"fmla v26.4h, v19.4h, v3.h[2] \n"
"fmla v27.4h, v19.4h, v3.h[3] \n"
"fmla v28.4h, v19.4h, v3.h[4] \n"
"fmla v29.4h, v19.4h, v3.h[5] \n"
"fmla v30.4h, v19.4h, v3.h[6] \n"
"fmla v31.4h, v19.4h, v3.h[7] \n"
"fmla v24.4h, v20.4h, v4.h[0] \n"
"fmla v25.4h, v20.4h, v4.h[1] \n"
"fmla v26.4h, v20.4h, v4.h[2] \n"
"fmla v27.4h, v20.4h, v4.h[3] \n"
"fmla v28.4h, v20.4h, v4.h[4] \n"
"fmla v29.4h, v20.4h, v4.h[5] \n"
"fmla v30.4h, v20.4h, v4.h[6] \n"
"fmla v31.4h, v20.4h, v4.h[7] \n"
"fmla v24.4h, v21.4h, v5.h[0] \n"
"fmla v25.4h, v21.4h, v5.h[1] \n"
"fmla v26.4h, v21.4h, v5.h[2] \n"
"fmla v27.4h, v21.4h, v5.h[3] \n"
"fmla v28.4h, v21.4h, v5.h[4] \n"
"fmla v29.4h, v21.4h, v5.h[5] \n"
"fmla v30.4h, v21.4h, v5.h[6] \n"
"fmla v31.4h, v21.4h, v5.h[7] \n"
"fmla v24.4h, v22.4h, v6.h[0] \n"
"fmla v25.4h, v22.4h, v6.h[1] \n"
"fmla v26.4h, v22.4h, v6.h[2] \n"
"fmla v27.4h, v22.4h, v6.h[3] \n"
"fmla v28.4h, v22.4h, v6.h[4] \n"
"fmla v29.4h, v22.4h, v6.h[5] \n"
"fmla v30.4h, v22.4h, v6.h[6] \n"
"fmla v31.4h, v22.4h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v23.4h, v7.h[0] \n"
"fmla v25.4h, v23.4h, v7.h[1] \n"
"fmla v26.4h, v23.4h, v7.h[2] \n"
"fmla v27.4h, v23.4h, v7.h[3] \n"
"fmla v28.4h, v23.4h, v7.h[4] \n"
"fmla v29.4h, v23.4h, v7.h[5] \n"
"fmla v30.4h, v23.4h, v7.h[6] \n"
"fmla v31.4h, v23.4h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n"
"fmla v24.4h, v16.4h, v0.h[0] \n"
"fmla v25.4h, v16.4h, v0.h[1] \n"
"fmla v26.4h, v16.4h, v0.h[2] \n"
"fmla v27.4h, v16.4h, v0.h[3] \n"
"fmla v24.4h, v17.4h, v0.h[4] \n"
"fmla v25.4h, v17.4h, v0.h[5] \n"
"fmla v26.4h, v17.4h, v0.h[6] \n"
"fmla v27.4h, v17.4h, v0.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n"
"fmla v24.4h, v18.4h, v1.h[0] \n"
"fmla v25.4h, v18.4h, v1.h[1] \n"
"fmla v26.4h, v18.4h, v1.h[2] \n"
"fmla v27.4h, v18.4h, v1.h[3] \n"
"fmla v24.4h, v19.4h, v1.h[4] \n"
"fmla v25.4h, v19.4h, v1.h[5] \n"
"fmla v26.4h, v19.4h, v1.h[6] \n"
"fmla v27.4h, v19.4h, v1.h[7] \n"
"fmla v24.4h, v20.4h, v2.h[0] \n"
"fmla v25.4h, v20.4h, v2.h[1] \n"
"fmla v26.4h, v20.4h, v2.h[2] \n"
"fmla v27.4h, v20.4h, v2.h[3] \n"
"fmla v24.4h, v21.4h, v2.h[4] \n"
"fmla v25.4h, v21.4h, v2.h[5] \n"
"fmla v26.4h, v21.4h, v2.h[6] \n"
"fmla v27.4h, v21.4h, v2.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v22.4h, v3.h[0] \n"
"fmla v25.4h, v22.4h, v3.h[1] \n"
"fmla v26.4h, v22.4h, v3.h[2] \n"
"fmla v27.4h, v22.4h, v3.h[3] \n"
"fmla v24.4h, v23.4h, v3.h[4] \n"
"fmla v25.4h, v23.4h, v3.h[5] \n"
"fmla v26.4h, v23.4h, v3.h[6] \n"
"fmla v27.4h, v23.4h, v3.h[7] \n"
"bne 0b \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel0_tm.row<const __fp16>(r);
float16x4_t _sum0 = vdup_n_f16((__fp16)0.f);
for (int q = 0; q < inch; q++)
{
float16x8_t _r0 = vld1q_f16(r0);
float16x4_t _k0 = vld1_f16(kptr);
float16x4_t _k1 = vld1_f16(kptr + 4);
float16x4_t _k2 = vld1_f16(kptr + 8);
float16x4_t _k3 = vld1_f16(kptr + 12);
float16x4_t _k4 = vld1_f16(kptr + 16);
float16x4_t _k5 = vld1_f16(kptr + 20);
float16x4_t _k6 = vld1_f16(kptr + 24);
float16x4_t _k7 = vld1_f16(kptr + 28);
_sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0);
_sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1);
_sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2);
_sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3);
_sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4);
_sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5);
_sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6);
_sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7);
kptr += 32;
r0 += 8;
}
vst1_f16(output0_tm, _sum0);
output0_tm += 4;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f);
__fp16 tmp[6][8][4];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28;
__fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
float16x4_t _out0tm0 = vld1_f16(output0_tm_0);
float16x4_t _out0tm1 = vld1_f16(output0_tm_1);
float16x4_t _out0tm2 = vld1_f16(output0_tm_2);
float16x4_t _out0tm3 = vld1_f16(output0_tm_3);
float16x4_t _out0tm4 = vld1_f16(output0_tm_4);
float16x4_t _out0tm5 = vld1_f16(output0_tm_5);
float16x4_t _out0tm6 = vld1_f16(output0_tm_6);
float16x4_t _out0tm7 = vld1_f16(output0_tm_7);
float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2);
float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4);
float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6);
float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f));
float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1_f16(tmp[0][m], _tmp0m);
vst1_f16(tmp[2][m], _tmp2m);
vst1_f16(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f));
vst1_f16(tmp[1][m], _tmp1m);
vst1_f16(tmp[3][m], _tmp3m);
vst1_f16(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float16x4_t _tmp00 = vld1_f16(tmp[m][0]);
float16x4_t _tmp01 = vld1_f16(tmp[m][1]);
float16x4_t _tmp02 = vld1_f16(tmp[m][2]);
float16x4_t _tmp03 = vld1_f16(tmp[m][3]);
float16x4_t _tmp04 = vld1_f16(tmp[m][4]);
float16x4_t _tmp05 = vld1_f16(tmp[m][5]);
float16x4_t _tmp06 = vld1_f16(tmp[m][6]);
float16x4_t _tmp07 = vld1_f16(tmp[m][7]);
float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02);
float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04);
float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06);
float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)));
float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1_f16(output0, _out00);
vst1_f16(output0 + 8, _out02);
vst1_f16(output0 + 16, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)));
vst1_f16(output0 + 4, _out01);
vst1_f16(output0 + 12, _out03);
vst1_f16(output0 + 20, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
matrix.c | //
// Created by Javier Peralta on 9/16/17.
//
#include "matrix.h"
//#include <omp.h>
void vectorScalar (double *v, double d, int size){
for(int i = 0; i < size; ++i ){
v[i] *= d;
}
}
void restaVector(double *v1, double *v2, double* out, int size){
//#pragma omp parallel for
for(int i = 0; i < size; ++i ){
out[i] = v1[i] - v2[i];
}
}
void sumaVector(double *v1, double *v2, double* out, int size){
//#pragma omp parallel for
for(int i = 0; i < size; ++i ){
out[i] = v1[i] + v2[i];
}
}
void multVector(double *vec1, double *vec2, double* out, int size){
//#pragma omp parallel for
for (int i = 0; i < size; i++) {
out[i] = vec1[i] * vec2[i];
}
}
double productoPunto(double *vec1, double *vec2, int size){
double c = 0;
for (int i = 0; i < size; i++) {
c += vec1[i] * vec2[i];
}
return c;
}
void productoPuntoA(double *vec1, double *vec2, double* vec3, int size){
//#pragma omp parallel for
for (int i = 0; i < size; i++) {
vec3[i] = vec1[i] * vec2[i];
}
}
void multMatriz(double **mat1, double **mat2, int n, int m, int p, int q, double **res){
//fila * columna
if (m != p) {
perror("Numero de filas de la primera matriz debe ser igual numero de columnas de la segunda\n");
return;
}
//#pragma omp parallel for
for (int i = 0; i < n; ++i) {
double *fila = res[i];
//#pragma omp parallel for
for (int j = 0; j < q; ++j) {
double c = 0;
//#pragma omp parallel for reduction(+:c)
for (int k = 0; k < m; ++k) {
c += mat1[i][k] * mat2[k][j];
}
fila[j] = c;
}
}
}
void multMatrizVect(double **mat, double *vec, int n, int m, double* res){
for (int i = 0; i < n; i++) {
res[i] = productoPunto(mat[i], vec, m);
}
}
//other
void printVect(double * a, int n){
for (int i = 0; i < n; ++i) {
if(a[i] >= 0) printf(" ");
printf("%3.3lf ", a[i]);
}
printf("\n");
}
void printMtx(double**a, int nr, int nc){
for (int i = 0; i < nr; ++i) {
printVect(a[i], nc);
}
}
void printMtxT(double**a, int nr, int nc){
for (int i = 0; i < nc; ++i) {
for (int j = 0; j < nr; ++j) {
if(a[j][i] >= 0) printf(" ");
printf("%3.3lf ", a[j][i]);
}
printf("\n");
}
}
double *readVector(char* name, int* sz){
FILE *f = fopen(name, "rb");
if (!f) return NULL;
fread(sz, sizeof(int), 1, f);
double *vect = malloc(sizeof(double) * *sz);
for (int i = 0; i < *sz; ++i) {
fread(vect, sizeof(double), *sz, f);
}
fclose(f);
return vect;
}
double **readMtx(char* name, int* nr, int* nc){
FILE *f = fopen(name, "rb");
if (!f) return NULL;
fread(nr, sizeof(int), 1, f);
fread(nc, sizeof(int), 1, f);
double **mtx = allocMtx(*nr, *nc);
for (int i = 0; i < *nr; ++i) {
fread(mtx[i], sizeof(double), (unsigned int)*nc, f);
}
fclose(f);
return mtx;
}
double **allocMtx(int nr, int nc){
double **mtx = malloc((sizeof(double*)*nr) + sizeof(int));
int *indi = (int*)mtx;
mtx = (void*)indi+ sizeof(int);
if(nr * nc * sizeof(double) < MTXMAXSIZE) {
indi[0] = 0; //indicate 1 block
mtx[0] = malloc(sizeof(double) * nr*nc);
for (int i = 1; i < nr; ++i) {
mtx[i] = mtx[i-1] + nc;
}
} else {
indi[0] = nr; //indicate nr block
for (int i = 0; i < nr; ++i) {
mtx[i] = malloc(sizeof(double) * nc);
}
}
return mtx;
}
double** allocMtxI(int n){
double ** mtx = allocMtx(n, n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
mtx[i][j] = j == i ? 1 : 0;
}
}
return mtx;
}
void freeMtx(double**a){
if(a == NULL) return; //nothing to free...
void *indi = (void*)a - sizeof(int);
int nr = ((int*)indi)[0];
if(nr){
for (int i = 0; i < nr; ++i) free(a[i]);
}
else free(a[0]);
free(indi);
}
//
double norma2Vect(double* v, int size){
return sqrt(norma2VectSq(v, size));
}
double norma2VectSq(double* v, int size){
double c = 0;
//#pragma omp parallel for reduction(+:c)
for (int i = 0; i < size; i++) {
double val = v[i];
c += val * val;
}
return c;
}
void normalizaVect(double *v, int size){
double norm = sqrt(norma2VectSq(v, size));
for (int i = 0; i < size; i++) v[i] /= norm;
}
double diffVectSq(double* v1, double* v2, int size){
double c = 0;
//#pragma omp parallel for reduction(+:c)
for (int i = 0; i < size; i++) {
double val = v1[i] - v2[i];
c += val * val;
}
return c;
}
double diffMatrizSq(double** m1, double** m2, int nr, int nc){
//#pragma omp parallel for reduction(+:c)
double c = 0;
int sz = nr*nc;
for (int i = 0; i < sz; ++i) {
double dif = m1[0][i] - m2[0][i];
c += dif* dif;
}
return c;
}
double* diagSol(double*a , double*b, int n){
double *vect = malloc(sizeof(double) * n);
//#pragma omp parallel
for (int i = 0; i < n; ++i) {
if (a[i] == 0){
if (b[i] != 0){
printf("Sin solución, X%d no tiene valor\n", i);
return NULL;
}
printf("Multiples Soluciones, X%d puede tener cualquier valor\n", i);
vect[i] = 0;
continue;
}
vect[i] = b[i]/a[i];
}
return vect;
}
double* upperSol(double**a , double*b, int nr, int nc){
double *vect = malloc(sizeof(double) * nc);
for (int i = nr -1; i >= 0; i--) {
double tmp = b[i];
for (int j = i+1; j < nc; ++j) {
tmp -= vect[j] * a[i][j];
}
vect[i] = tmp / a[i][i];
}
return vect;
}
double* lowerSol(double**a , double*b, int nr, int nc){
double *vect = malloc(sizeof(double) * nc);
for (int i = 0; i < nr; ++i) {
double tmp = b[i];
for (int j = 0; j < i && j < nc; ++j) {
tmp -= vect[j] * a[i][j];
}
tmp /= a[i][i];
vect[i] = tmp;
}
return vect;
}
int luFactor(double** a, double **l, double **u, int nr, int nc){
for (int i = 0; i < nr; ++i) {
u[i][i] = 1;
for (int j = 0; j <= i && j <nc; ++j) {
double lij = a[i][j];
for (int k = 0; k < j; ++k) {
lij -= l[i][k]*u[k][j];
}
l[i][j] = lij;
}
for (int j = i+1; j < nc; ++j) {
double lij = a[i][j];
if(fabs(l[i][i]) < ZERO)
return 0;
for (int k = 0; k < i; ++k) {
lij -= l[i][k]*u[k][j];
}
lij /= l[i][i];
u[i][j] = lij;
}
}
return 1;
}
double* luSolver(double **l, double **u, double *b, int nr, int nc){
double* sol = lowerSol(l, b, nr, nc);
double* sol2 = upperSol(u, sol, nr, nc);
free(sol);
return sol2;
}
//same as lu factor, but in 1 matrix
int luFactor2(double **a, int nr, int nc){
for (int i = 0; i < nr; ++i) {
for (int j = 0; j <= i && j <nc; ++j) {
double lij = a[i][j];
for (int k = 0; k < j; ++k) {
lij -= a[i][k]*a[k][j];
}
a[i][j] = lij;
}
for (int j = i+1; j < nc; ++j) {
double lij = a[i][j];
if(fabs(a[i][i]) < ZERO)
return 0;
for (int k = 0; k < i; ++k) {
lij -= a[i][k]*a[k][j];
}
lij /= a[i][i];
a[i][j] = lij;
}
}
return 1;
}
double* luSolver2(double **a, double *b, int nr, int nc){
double* sol = lowerSol(a, b, nr, nc);
//need to do upper sol with upper a and 1 in diagonal
for (int i = nr -1; i >= 0; i--) {
double tmp = sol[i];
for (int j = i+1; j < nc; ++j) {
tmp -= sol[j] * a[i][j];
}
sol[i] = tmp;
}
return sol;
}
double* triDiagSol(double **a, double *d, int nr, int nc){
double *xi = malloc(sizeof(double) * nr);
double *ax = a[0], *bx = a[1], *cx = a[2];
cx[0] /= bx[0];
d[0] /= bx[0];
for (int i = 1; i < nc; ++i) {
double ptemp = bx[i] - (ax[i] * cx[i-1]);
cx[i] /= ptemp;
d[i] = (d[i] - ax[i] * d[i-1])/ptemp;
}
xi[nr-1] = d[nr-1];
for (int i = nr-2; i >= 0; --i) {
xi[i] = d[i] - cx[i] * xi[i+1];
}
return xi;
}
double potencia(double **mat, double *eigvec, int nr, int nc, int maxIter, double toler){
double error;
for (int i = 0; i < nr; ++i) eigvec[i] = 1;
double *y = malloc(sizeof(double) * nr);
double *vt = malloc(sizeof(double) * nr);
double eigV = 0;
int i = 0;
do {
multMatrizVect(mat, eigvec, nr, nc, y);
memcpy(eigvec, y, nr * sizeof(double));
normalizaVect(eigvec, nr);
multMatrizVect(mat, eigvec, nr, nc, vt);
eigV = productoPunto(eigvec, vt, nr);
memcpy(vt, eigvec, nr * sizeof(double));
vectorScalar(vt, eigV, nr);
restaVector(y, vt, vt, nr);
error = norma2Vect(vt, nr);
}
while(++i < maxIter && error > toler);
free(y); free(vt);
// printf("Matriz tam %d x %d\n", nr, nc);
// printf("Valor lambda %lf\n", eigV);
// printf("Iteraciones realizadas %d\n", i);
// printf("Error %g\n", error);
return eigV;
}
double smallestEigv(double **mat, double *eigvec, int n, int m, int maxIter, double toler){
double **inv = allocMtx(m, n);
inverseMtx(mat, inv, n, m);
double lam = potencia(inv, eigvec, m, n, 1000, 0.0001);
freeMtx(inv);
return fabs(lam) > ZERO ? 1/lam : lam;
}
double nearestEigv(double **mat, double *eigvec, double val, int n, int m, int maxIter, double toler){
for (int i = 0; i < n; ++i) {
mat[i][i] -= val;
}
double l = smallestEigv(mat, eigvec, n, m, maxIter, toler);
for (int i = 0; i < n; ++i) {
mat[i][i] += val;
}
return val + l;
}
double potenciaInv(double **mat, double *eigvec, double val, int n, int m, int maxIter, double toler, int *k, double *err){
for (int i = 0; i < n; ++i) {
mat[i][i] -= val;
}
double **inv = allocMtx(m, n);
inverseMtx(mat, inv, n, m);
for (int i = 0; i < n; ++i) eigvec[i] = 1;
double *y = malloc(sizeof(double) * n);
double *px = malloc(sizeof(double) * n);
double mu = 0;
do {
multMatrizVect(inv, eigvec, n, m, y);
double norm = norma2Vect(y, n);
vectorScalar(y, 1/norm, n); //x^
vectorScalar(eigvec, 1/norm, n); //w
mu = productoPunto(y, eigvec, n);
memcpy(px, y, sizeof(double) * n);
vectorScalar(px, mu, n);
mu += val;
restaVector(eigvec, px, px, n);
memcpy(eigvec, y, sizeof(double) *n);
*k += 1;
*err = norma2Vect(px, n);
} while(*err > toler && maxIter > *k);
for (int i = 0; i < n; ++i) {
mat[i][i] += val;
}
free(px);
free(y);
freeMtx(inv);
return mu;
}
double* allEigv(double **mat, int n, int m, int maxIter, double toler, int sections){
double d = normaInf(mat, n, m);
double delta = 2*d/sections;
double *eigvals = malloc(sizeof(double) * n);
for (int i = 0; i < n; ++i) eigvals[i] = NAN;
double *eigVect = malloc(sizeof(double) * n);
int i = 0;
int k;
double err;
for (int t = 0; t <= sections; ++t) {
k = 0;
double aprox = -d + t*delta;
double val = potenciaInv(mat, eigVect, aprox, n, m, maxIter, toler, &k, &err);
if((i==0 || fabs(val - eigvals[i-1]) > 0.0001) && err < toler){
eigvals[i++] = val;
printf("----------------\nValor mu %lf\n", val);
printf("Iteraciones realizadas %d\n", k);
printf("||r|| %g\n----------------\n", err);
}
}
free(eigVect);
return eigvals;
}
double normaInf(double **m1, int n, int m){
double max = 0;
for (int i = 0; i < n; ++i) {
double sum = 0;
for (int j = 0; j < m; ++j) {
sum += fabs(m1[i][j]);
}
if(sum > max) max = sum;
}
return max;
}
void inverseMtx(double **mat, double **inv, int n, int m){
double **l = allocMtx(n, m);
double **u = allocMtx(n, m);
if (luFactor(mat, l, u, n, m)){
double *b = malloc(sizeof(double) * m);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
b[j] = j == i;
}
double *sol = luSolver(l, u, b, n, m);
for (int j = 0; j < n; ++j) {
inv[j][i] = sol[j];
}
free(sol);
}
free(b);
}
freeMtx(l); freeMtx(u);
}
//jacobi
double valMayor(double **mat, int n, int m, int *x, int *y){
double mayor = 0;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
if (i == j) continue;
if(mayor < fabs(mat[i][j])){
mayor = fabs(mat[i][j]);
*x = i; *y = j;
}
}
}
return mayor;
}
//GT * A * G
void givensRotate(double **mat, int n, int m, int mi, int mj, double c, double s){
for (int i = 0; i < m; ++i) {
double matimi = mat[i][mi];
mat[i][mi] = matimi * c - s*mat[i][mj];
mat[i][mj] = matimi * s + c*mat[i][mj];
}
for (int i = 0; i < n; ++i) {
double matmii = mat[mi][i];
mat[mi][i] = mat[mi][i] * c - s*mat[mj][i];
mat[mj][i] = matmii * s + c*mat[mj][i];
}
}
void givensM(double **mat, int n, int m, int mi, int mj, double c, double s){
for (int i = 0; i < m; ++i) {
double matimi = mat[i][mi];
mat[i][mi] = matimi * c - s*mat[i][mj];
mat[i][mj] = matimi * s + c*mat[i][mj];
}
}
double* jacobiEig(double **mat, double**eigVec, int n, int m, int maxIter, double toler){
int x, y;
double max = valMayor(mat, n, m, &x, &y);
if(max < toler) return NULL; //eigvs in diag
double **eigvalsM = allocMtx(n, m);
for (int i = 0; i < n; ++i) memcpy(eigvalsM[i], mat[i], sizeof(double) * m);
int iter = 0;
while (max > toler && ++iter < maxIter){
double d = (eigvalsM[y][y] - eigvalsM[x][x])/(2 * eigvalsM[x][y]);
double t = 1 / (fabs(d) + sqrt(1 + d*d));
t = d > 0 ? t : -t;
double c = 1/(sqrt(1 + t * t));
double s = c * t;
givensRotate(eigvalsM, n, m, x, y, c, s);
givensM(eigVec, n, n, x, y, c, s);
max = valMayor(eigvalsM, n, m, &x, &y);
}
//printf("--------\n");printMtx(eigvalsM, n, m);
//printf("--------\n");printMtx(eigVec, n, m);
printf("Iteraciones: %d\n", iter);
double **AV = allocMtx(n, m);
multMatriz(mat, eigVec, n, m, m, n, AV);
double **VD = allocMtx(n, m);
multMatriz(eigVec, eigvalsM, n, m, m, n, VD);
printf("||AV - VD|| = %g\n", sqrt(diffMatrizSq(AV, VD, n, m)));
freeMtx(VD); freeMtx(AV);
double *eigvals = malloc(sizeof(double) * n);
for (int i = 0; i < n; ++i) {
eigvals[i] = eigvalsM[i][i];
}
freeMtx(eigvalsM);
return eigvals;
} |
GB_binop__bor_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint16)
// A*D function (colscale): GB (_AxD__bor_uint16)
// D*A function (rowscale): GB (_DxB__bor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint16)
// C=scalar+B GB (_bind1st__bor_uint16)
// C=scalar+B' GB (_bind1st_tran__bor_uint16)
// C=A+scalar GB (_bind2nd__bor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bor_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT16 || GxB_NO_BOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_subassign_04.c | //------------------------------------------------------------------------------
// GB_subassign_04: C(I,J) += A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: present
// A: matrix
// S: constructed
#define GB_FREE_WORK GB_FREE_TWO_SLICE
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_04
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_BinaryOp accum,
const GrB_Matrix A,
const GrB_Matrix S,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ;
GB_GET_A ;
GB_GET_S ;
GB_GET_ACCUM ;
//--------------------------------------------------------------------------
// Method 04: C(I,J) += A ; using S
//--------------------------------------------------------------------------
// Time: Close to Optimal. Every entry in A must be visited, and the
// corresponding entry in S must then be found. Time for this phase is
// Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S))
// time. This method simply traverses all of A+S (like GB_add for
// computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)).
// The only difference is that the traversal of A+S can terminate if A is
// exhausted. Entries in S but not A do not actually require any work
// (unlike Method 02, which must visit all entries in A+S).
// Method 02 and Method 04 are somewhat similar. They differ on how C is
// modified when the entry is present in S but not A.
// Compare with Method 16, which computes C(I,J)<!M> += A, using S.
//--------------------------------------------------------------------------
// Parallel: Z=A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
GB_SUBASSIGN_TWO_SLICE (A, S) ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and S(:,j)
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ;
GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ;
//------------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//------------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = Si [pS] ;
int64_t iA = Ai [pA] ;
if (iS < iA)
{
// ----[C . 1] or [X . 1]-----------------------------------
// S (i,j) is present but A (i,j) is not
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
task_pending++ ;
GB_NEXT (A) ;
}
else
{
// ----[C A 1] or [X A 1]-----------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// List A (:,j) has entries. List S (:,j) exhausted.
task_pending += (pA_end - pA) ;
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and S(:,j)
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ;
GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ;
//------------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//------------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = Si [pS] ;
int64_t iA = Ai [pA] ;
if (iS < iA)
{
GB_NEXT (S) ;
}
else if (iA < iS)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
else
{
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S (:,j)
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// ----[. A 1]--------------------------------------------------
// S (i,j) is not present, A (i,j) is present
// [. A 1]: action: ( insert )
int64_t iA = Ai [pA] ;
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
convolution_3x3_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt);
conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt);
conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4);
int p = 0;
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
Mat g0 = kernel_tm_pack8to1.channel(p / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
#if __AVXVNNI__ || __AVX512VNNI__
for (int i = 0; i < 4; i++)
{
const short* k00 = k0.row<const short>(q + i * 2);
const short* k10 = k1.row<const short>(q + i * 2);
const short* k20 = k2.row<const short>(q + i * 2);
const short* k30 = k3.row<const short>(q + i * 2);
const short* k01 = k0.row<const short>(q + i * 2 + 1);
const short* k11 = k1.row<const short>(q + i * 2 + 1);
const short* k21 = k2.row<const short>(q + i * 2 + 1);
const short* k31 = k3.row<const short>(q + i * 2 + 1);
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k10[k];
g00[3] = k11[k];
g00[4] = k20[k];
g00[5] = k21[k];
g00[6] = k30[k];
g00[7] = k31[k];
g00 += 8;
}
#else
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00[1] = k1.row<const short>(q + i)[k];
g00[2] = k2.row<const short>(q + i)[k];
g00[3] = k3.row<const short>(q + i)[k];
g00 += 4;
}
#endif
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd42_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0);
__m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16));
__m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32));
__m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01);
__m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03);
__m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05);
__m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001);
__m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001);
__m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203);
__m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203);
__m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405);
__m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405);
__m128i _v5 = _mm_set1_epi16(5);
__m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5));
__m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2));
__m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2));
__m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1));
__m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1));
__m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5));
_mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m);
_mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m);
_mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m);
_mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m);
_mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m);
_mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
__m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]);
__m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]);
__m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]);
__m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]);
__m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]);
__m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]);
__m128i _v5 = _mm_set1_epi16(5);
__m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5));
__m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2));
__m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2));
__m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1));
__m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1));
__m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5));
_mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0);
_mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1);
_mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2);
_mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3);
_mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4);
_mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __AVX2__
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
short* tmpptr = tm2.row<short>(i / 4);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256i _r0 = _mm256_loadu_si256((const __m256i*)r0);
__m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16));
_mm256_storeu_si256((__m256i*)tmpptr, _r0);
_mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 32;
}
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2);
#else
short* tmpptr = tm2.row<short>(i / 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m128i _r0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
_mm_storeu_si128((__m128i*)tmpptr, _r0);
_mm_storeu_si128((__m128i*)(tmpptr + 8), _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
#if __AVX2__
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2);
#else
short* tmpptr = tm2.row<short>(i / 2 + i % 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m128i _r0 = _mm_loadu_si128((const __m128i*)r0);
_mm_storeu_si128((__m128i*)tmpptr, _r0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p / 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
const short* r0 = bb2.row<const short>(i / 4);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
__m256i _sum4_5 = _mm256_setzero_si256();
__m256i _sum6_7 = _mm256_setzero_si256();
for (int j = 0; j < nn; j++)
{
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
__m256i _val0 = _mm256_loadu_si256((const __m256i*)r0);
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef);
#else
// 0 0 1 1 2 2 3 3 8 8 9 9 a a b b
// 4 4 5 5 6 6 7 7 c c d d e e f f
__m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0);
__m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0);
__m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123);
__m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab);
__m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567);
__m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef);
__m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13));
#endif
__m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123);
_sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab);
_sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567);
_sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef);
#else
__m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1);
__m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1);
__m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123);
__m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123);
__m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab);
__m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab);
__m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567);
__m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567);
__m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef);
__m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef);
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17));
#endif
r0 += 32;
k0 += 32;
}
__m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1));
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
__m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1));
_sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7);
int sum[16];
_mm256_storeu_si256((__m256i*)sum, _sum0_2);
_mm256_storeu_si256((__m256i*)(sum + 8), _sum4_6);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm[1] = sum[4];
output1_tm[1] = sum[5];
output2_tm[1] = sum[6];
output3_tm[1] = sum[7];
output0_tm[2] = sum[8];
output1_tm[2] = sum[9];
output2_tm[2] = sum[10];
output3_tm[2] = sum[11];
output0_tm[3] = sum[12];
output1_tm[3] = sum[13];
output2_tm[3] = sum[14];
output3_tm[3] = sum[15];
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2);
#else
const short* r0 = bb2.row<const short>(i / 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
for (int j = 0; j < nn; j++)
{
#if __AVX2__
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
__m256i _val = _mm256_loadu_si256((const __m256i*)r0);
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef);
#else
__m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val);
__m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val);
__m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123);
__m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab);
__m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567);
__m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef);
__m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13));
#endif
#else
// 0 1 2 3 4 5 6 7
__m128i _val0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8));
__m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16));
__m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24));
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0);
__m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0);
__m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1);
__m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1);
__m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123);
__m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123);
__m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567);
__m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567);
__m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123);
__m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123);
__m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567);
__m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567);
__m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01);
__m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01);
__m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01);
__m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01);
__m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23);
__m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23);
__m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23);
__m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23);
__m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45);
__m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45);
__m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45);
__m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45);
__m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67);
__m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67);
__m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67);
__m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13));
#endif
r0 += 16;
k0 += 32;
}
#if __AVX2__
__m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1));
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
int sum[8];
_mm256_storeu_si256((__m256i*)sum, _sum0_2);
#else
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum0);
_mm_storeu_si128((__m128i*)(sum + 4), _sum2);
#endif
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm[1] = sum[4];
output1_tm[1] = sum[5];
output2_tm[1] = sum[6];
output3_tm[1] = sum[7];
output0_tm += 2;
output1_tm += 2;
output2_tm += 2;
output3_tm += 2;
}
for (; i < tiles; i++)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2);
#else
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
#endif
for (int j = 0; j < nn; j++)
{
// 0 1 2 3 4 5 6 7
__m128i _val = _mm_loadu_si128((const __m128i*)r0);
#if __AVX2__
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
// 0 1 0 1 x x x x
// 0 1 0 1 0 1 0 1
__m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3));
__m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1);
__m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567);
#else
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val));
__m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val));
_val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
_val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
#endif
#else
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8));
__m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16));
__m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24));
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m128i _val_0123 = _mm_unpacklo_epi16(_val, _val);
__m128i _val_4567 = _mm_unpackhi_epi16(_val, _val);
__m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123);
__m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123);
__m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567);
__m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567);
__m128i _sl0 = _mm_mullo_epi16(_w0, _val01);
__m128i _sh0 = _mm_mulhi_epi16(_w0, _val01);
__m128i _sl1 = _mm_mullo_epi16(_w1, _val23);
__m128i _sh1 = _mm_mulhi_epi16(_w1, _val23);
__m128i _sl2 = _mm_mullo_epi16(_w2, _val45);
__m128i _sh2 = _mm_mulhi_epi16(_w2, _val45);
__m128i _sl3 = _mm_mullo_epi16(_w3, _val67);
__m128i _sh3 = _mm_mulhi_epi16(_w3, _val67);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3));
#endif
r0 += 8;
k0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
#endif
_sum0 = _mm_add_epi32(_sum0, _sum1);
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
const short* r0 = bb2.row<const short>(i / 4);
const short* k0 = kernel0_tm.row<const short>(r);
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
__m128i _sum4 = _mm_setzero_si128();
__m128i _sum5 = _mm_setzero_si128();
__m128i _sum6 = _mm_setzero_si128();
__m128i _sum7 = _mm_setzero_si128();
for (int q = 0; q < inch; q++)
{
__m128i _val0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
__m128i _val2 = _mm_loadu_si128((const __m128i*)(r0 + 16));
__m128i _val3 = _mm_loadu_si128((const __m128i*)(r0 + 24));
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _sl0 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh1 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl2 = _mm_mullo_epi16(_val2, _w0);
__m128i _sh2 = _mm_mulhi_epi16(_val2, _w0);
__m128i _sl3 = _mm_mullo_epi16(_val3, _w0);
__m128i _sh3 = _mm_mulhi_epi16(_val3, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1));
_sum4 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2));
_sum5 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2));
_sum6 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl3, _sh3));
_sum7 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3));
k0 += 8;
r0 += 16;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum4 = _mm_add_epi32(_sum4, _sum5);
_sum6 = _mm_add_epi32(_sum6, _sum7);
output0_tm[0] = _mm_reduce_add_epi32(_sum0);
output0_tm[1] = _mm_reduce_add_epi32(_sum2);
output0_tm[2] = _mm_reduce_add_epi32(_sum4);
output0_tm[3] = _mm_reduce_add_epi32(_sum6);
output0_tm += 4;
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2);
#else
const short* r0 = bb2.row<const short>(i / 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
for (int q = 0; q < inch; q++)
{
__m128i _val0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _sl0 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh1 = _mm_mulhi_epi16(_val1, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1));
k0 += 8;
r0 += 16;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
output0_tm[0] = _mm_reduce_add_epi32(_sum0);
output0_tm[1] = _mm_reduce_add_epi32(_sum2);
output0_tm += 2;
}
for (; i < tiles; i++)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2);
#else
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
for (int q = 0; q < inch; q++)
{
__m128i _val = _mm_loadu_si128((const __m128i*)r0);
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _sl0 = _mm_mullo_epi16(_val, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
k0 += 8;
r0 += 8;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
output0_tm[0] = _mm_reduce_add_epi32(_sum0);
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1;
const int* output0_tm_1 = output0_tm_0 + tiles * 1;
const int* output0_tm_2 = output0_tm_0 + tiles * 2;
const int* output0_tm_3 = output0_tm_0 + tiles * 3;
const int* output0_tm_4 = output0_tm_0 + tiles * 4;
const int* output0_tm_5 = output0_tm_0 + tiles * 5;
int* output0 = out0.row<int>(i * 4) + j * 4;
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
// TODO sse optimize
for (int m = 0; m < 5; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b;
tmp[1][m] = tmp13a + tmp13b * 2;
tmp[2][m] = tmp02a + tmp02b * 4;
tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 5; m < 6; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4;
tmp[1][m] = (tmp13a + tmp13b * 2) * 4;
tmp[2][m] = (tmp02a + tmp02b * 4) * 4;
tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 0; m < 4; m++)
{
const int* tmp0 = tmp[m];
int tmp02a = tmp0[1] + tmp0[2];
int tmp13a = tmp0[1] - tmp0[2];
int tmp02b = tmp0[3] + tmp0[4];
int tmp13b = tmp0[3] - tmp0[4];
output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576;
output0[1] = (tmp13a + tmp13b * 2) / 576;
output0[2] = (tmp02a + tmp02b * 4) / 576;
output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
measure.c | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
int collapse_cluster(FILE *input_fptr, FILE *output_fptr, int rank, int subcircuit_idx, int num_instance, int cluster_circ_size, int **correspondece_map, int num_effective_qubits, int num_collapsed);
float* measure_instance(int subcircuit_circ_size, char** meas, float *unmeasured_prob, int **correspondece_map, int num_effective);
void measure(char *eval_folder, char *eval_mode, int subcircuit_idx, int num_eval_files, int *eval_files, int rank);
int** effective_full_state_correspondence(int cluster_circ_size, char **meas);
int* decToBinary(int num, int num_digits);
int binaryToDec(int *bin_num, int num_digits);
void print_int_arr(int *arr, int num_elements);
void print_float_arr(float *arr, int num_elements);
int search_element(int *arr, int arr_size, int element);
int combine_effective_O_state(int *bin_effective_state, int num_effective_qubits, int *bin_O_state, int num_O_qubits, int *O_qubit_positions);
float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank,int subcircuit_idx);
double get_sec();
int main(int argc, char** argv) {
int rank = atoi(argv[1]);
char *eval_folder = argv[2];
char *eval_mode = argv[3];
int full_circ_size = atoi(argv[4]);
int subcircuit_idx = atoi(argv[5]);
int num_eval_files = atoi(argv[6]);
int *eval_files = calloc(num_eval_files,sizeof(int));
int i;
for (i=0; i<num_eval_files; i++) {
eval_files[i] = atoi(argv[7+i]);
}
measure(eval_folder,eval_mode,subcircuit_idx,num_eval_files,eval_files,rank);
free(eval_files);
// printf("%s subcircuit %d (%d instances) measure rank %d DONE\n",eval_folder,subcircuit_idx,num_eval_files,rank);
return 0;
}
void measure(char *eval_folder, char *eval_mode, int subcircuit_idx, int num_eval_files, int *eval_files, int rank) {
char *eval_file = malloc(256*sizeof(char));
sprintf(eval_file, "%s/raw_%d_%d.txt", eval_folder, subcircuit_idx, eval_files[0]);
FILE* eval_fptr = fopen(eval_file, "r");
int subcircuit_circ_size, num_effective;
fscanf(eval_fptr, "d=%d effective=%d\n", &subcircuit_circ_size,&num_effective);
char *init[subcircuit_circ_size], *meas[subcircuit_circ_size];
int qubit_ctr;
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
init[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", init[qubit_ctr]);
}
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
meas[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", meas[qubit_ctr]);
}
free(eval_file);
fclose(eval_fptr);
int **correspondece_map;
if (strcmp(eval_mode,"runtime")==0) {
correspondece_map = (int **)malloc(sizeof(int *)*1);
}
else {
correspondece_map = effective_full_state_correspondence(subcircuit_circ_size, meas);
}
int eval_file_ctr;
double total_measure_time = 0;
double log_time = 0;
for (eval_file_ctr=0;eval_file_ctr<num_eval_files;eval_file_ctr++) {
double measure_begin = get_sec();
char *eval_file = malloc(256*sizeof(char));
sprintf(eval_file, "%s/raw_%d_%d.txt", eval_folder, subcircuit_idx, eval_files[eval_file_ctr]);
// printf("Measuring %s\n",eval_file);
FILE* eval_fptr = fopen(eval_file, "r");
char line[256];
int subcircuit_circ_size, num_effective;
fscanf(eval_fptr, "d=%d effective=%d\n", &subcircuit_circ_size,&num_effective);
char *init[subcircuit_circ_size], *meas[subcircuit_circ_size];
int qubit_ctr;
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
init[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", init[qubit_ctr]);
// printf("%s ",init[qubit_ctr]);
}
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
meas[qubit_ctr] = malloc(16*sizeof(char));
fscanf(eval_fptr, "%s ", meas[qubit_ctr]);
// printf("%s ",meas[qubit_ctr]);
}
long long int state_ctr;
long long int num_effective_states = (long long int) pow(2,num_effective);
char *meas_file = malloc(256*sizeof(char));
sprintf(meas_file, "%s/measured_%d_%d.txt", eval_folder, subcircuit_idx, eval_files[eval_file_ctr]);
FILE *meas_fptr = fopen(meas_file, "w");
if (strcmp(eval_mode,"runtime")==0) {
float measured_prob;
fscanf(eval_fptr, "%f ", &measured_prob);
fprintf(meas_fptr,"%e ",measured_prob);
}
else {
long long int unmeasured_len = (long long int) pow(2,subcircuit_circ_size);
float *unmeasured_prob = malloc(unmeasured_len*sizeof(float));
for (state_ctr=0;state_ctr<unmeasured_len;state_ctr++){
fscanf(eval_fptr, "%f ", &unmeasured_prob[state_ctr]);
}
// printf("\n");
float* measured_prob = measure_instance(subcircuit_circ_size,meas,unmeasured_prob,correspondece_map,num_effective);
for (state_ctr=0;state_ctr<num_effective_states;state_ctr++) {
fprintf(meas_fptr,"%e ",measured_prob[state_ctr]);
}
}
remove(eval_file);
free(eval_file);
fclose(eval_fptr);
free(meas_file);
fclose(meas_fptr);
log_time += get_sec() - measure_begin;
total_measure_time += get_sec() - measure_begin;
// NOTE: log_frequency is hard coded here
log_time = print_log(log_time,total_measure_time,eval_file_ctr+1,num_eval_files,300,rank,subcircuit_idx);
}
char *summary_file = malloc(256*sizeof(char));
sprintf(summary_file, "%s/rank_%d_summary.txt", eval_folder, rank);
FILE *summary_fptr = fopen(summary_file, "w");
fprintf(summary_fptr,"Total measure time = %e\n",total_measure_time);
fprintf(summary_fptr,"measure DONE\n");
free(summary_file);
fclose(summary_fptr);
return;
}
float* measure_instance(int subcircuit_circ_size, char** meas, float *unmeasured_prob, int **correspondece_map, int num_effective) {
int num_O_qubits = subcircuit_circ_size - num_effective;
// printf("\n");
if (num_effective==subcircuit_circ_size) {
return unmeasured_prob;
}
else{
long long int measured_len = (long long int) pow(2,num_effective);
float *measured_prob = calloc(measured_len,sizeof(float));
long long int measured_state_ctr;
//#pragma omp parallel for
for (measured_state_ctr=0;measured_state_ctr<measured_len;measured_state_ctr++) {
// printf("Effective_state : %d\n",effective_state_ctr);
int O_state_ctr;
int num_O_states = (int) pow(2,num_O_qubits);
for (O_state_ctr=0;O_state_ctr<num_O_states;O_state_ctr++){
int full_state = correspondece_map[measured_state_ctr][O_state_ctr];
int *bin_full_state = decToBinary(full_state, subcircuit_circ_size); // Decompose the function to in-place
int sigma = 1;
int qubit_ctr;
for (qubit_ctr=0;qubit_ctr<subcircuit_circ_size;qubit_ctr++) {
if (bin_full_state[qubit_ctr]==1 && strcmp(meas[subcircuit_circ_size-1-qubit_ctr],"I")!=0 && strcmp(meas[subcircuit_circ_size-1-qubit_ctr],"comp")!=0) {
sigma *= -1;
}
}
// print_int_arr(bin_full_state, subcircuit_circ_size);
// printf("(%d) ",full_state);
measured_prob[measured_state_ctr] += sigma*unmeasured_prob[full_state];
// printf("corresponding full_state : %d, sigma = %d, val = %.5e, measured_prob = %.5e\n",full_state, sigma, sigma*unmeasured_prob[full_state],measured_prob[measured_state_ctr]);
}
if (measured_prob[measured_state_ctr]>10) {
printf("Something Wrong\n");
exit(0);
}
// printf("\n");
}
return measured_prob;
}
}
int** effective_full_state_correspondence(int cluster_circ_size, char **meas) {
int num_effective_qubits = 0;
int num_O_qubits = 0;
int qubit_ctr;
int O_qubit_positions[cluster_circ_size];
for (qubit_ctr=0;qubit_ctr<cluster_circ_size;qubit_ctr++) {
if (strcmp(meas[qubit_ctr],"comp")==0) {
num_effective_qubits++;
}
else {
O_qubit_positions[num_O_qubits] = qubit_ctr;
num_O_qubits++;
}
}
int num_O_states = (int) pow(2,num_O_qubits);
int num_effective_states = (int) pow(2,num_effective_qubits);
int effective_state;
int **correspondece_map = (int **)malloc(sizeof(int *)*num_effective_states);
for (effective_state=0;effective_state<num_effective_states;effective_state++) {
int *bin_effective_state = decToBinary(effective_state, num_effective_qubits);
// printf("Effective state = %d\n",effective_state);
int O_state;
correspondece_map[effective_state]=(int *)malloc(sizeof(int)*num_O_states);
for (O_state=0;O_state<num_O_states;O_state++) {
int *bin_O_state = decToBinary(O_state, num_O_qubits);
int full_state = combine_effective_O_state(bin_effective_state, num_effective_qubits, bin_O_state, num_O_qubits, O_qubit_positions);
// printf("%d ",full_state);
correspondece_map[effective_state][O_state] = full_state;
}
// printf("\n");
}
return correspondece_map;
}
int* decToBinary(int num, int num_digits) {
int *bin = malloc(num_digits*sizeof(int));
int i;
for (i = num_digits - 1; i >= 0; i--) {
int k = num >> i;
if (k & 1) {
bin[num_digits - 1 - i] = 1;
}
else {
bin[num_digits - 1 - i] = 0;
}
}
return bin;
}
int binaryToDec(int *bin_num, int num_digits) {
int i;
int dec = 0;
for (i=0;i<num_digits;i++) {
if (bin_num[i]==1) {
// printf("Add %d\n",1<<(num_digits-1-i));
dec += 1<<(num_digits-1-i);
}
}
return dec;
}
void print_int_arr(int *arr, int num_elements) {
int ctr;
if (num_elements<=10) {
for (ctr=0;ctr<num_elements;ctr++) {
printf("%d ",arr[ctr]);
}
}
else {
for (ctr=0;ctr<5;ctr++) {
printf("%d ",arr[ctr]);
}
printf(" ... ");
for (ctr=num_elements-5;ctr<num_elements;ctr++) {
printf("%d ",arr[ctr]);
}
}
printf(" = %d elements\n",num_elements);
}
void print_float_arr(float *arr, int num_elements) {
int ctr;
if (num_elements<=10) {
for (ctr=0;ctr<num_elements;ctr++) {
printf("%e ",arr[ctr]);
}
}
else {
for (ctr=0;ctr<5;ctr++) {
printf("%e ",arr[ctr]);
}
printf(" ... ");
for (ctr=num_elements-5;ctr<num_elements;ctr++) {
printf("%e ",arr[ctr]);
}
}
printf(" = %d elements\n",num_elements);
}
int search_element(int *arr, int arr_size, int element) {
int i;
for(i=0;i<arr_size;i++) {
if (arr[i]==element) {
return i;
}
}
return -1;
}
int combine_effective_O_state(int *bin_effective_state, int num_effective_qubits, int *bin_O_state, int num_O_qubits, int *O_qubit_positions) {
// printf("effective_state : ");
// print_int_arr(bin_effective_state,num_effective_qubits);
// printf(", inserting O_state ");
// print_int_arr(bin_O_state,num_O_qubits);
// printf(" at O positions ");
// print_int_arr(O_qubit_positions,num_O_qubits);
// printf("\n");
int bin_full_state[num_effective_qubits+num_O_qubits];
int full_state_ctr;
int effective_state_ctr = 0;
int O_state_ctr = 0;
for (full_state_ctr=0;full_state_ctr<num_effective_qubits+num_O_qubits;full_state_ctr++) {
int O_qubit_position = search_element(O_qubit_positions, num_O_qubits, full_state_ctr);
if (O_qubit_position==-1) {
bin_full_state[num_effective_qubits+num_O_qubits-1-full_state_ctr] = bin_effective_state[num_effective_qubits - 1 - effective_state_ctr];
effective_state_ctr++;
}
else {
bin_full_state[num_effective_qubits+num_O_qubits-1-full_state_ctr] = bin_O_state[O_qubit_position];
}
}
int full_state = binaryToDec(bin_full_state,num_effective_qubits+num_O_qubits);
// printf("Full state:");
// print_int_arr(bin_full_state,num_effective_qubits+num_O_qubits);
// printf(" --> %d\n",full_state);
return full_state;
}
float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank,int subcircuit_idx) {
if (log_time>log_frequency) {
double eta = elapsed_time/num_finished_jobs*num_total_jobs - elapsed_time;
printf("Meas_rank %d measured subcircuit %d %d/%d, elapsed = %e, ETA = %e\n",rank,subcircuit_idx,num_finished_jobs,num_total_jobs,elapsed_time,eta);
return 0;
}
else {
return log_time;
}
}
double get_sec() {
struct timeval time;
gettimeofday(&time, NULL);
return (time.tv_sec + 1e-6 * time.tv_usec);
}
|
verlet_stream_and_noise.c |
#include "cloud.h"
#include "verlet.h"
void
verlet_step_stream_and_noise (int Np, double dt_stream, double dt_noise,
cse6230rand_t *rand,
double *restrict X[3], const double *restrict U[3])
{
if (dt_noise) {
size_t tag;
tag = cse6230rand_get_tag (rand);
#pragma omp parallel for
for (int i = 0; i < Np; i++) {
double rval[4];
cse6230rand_hash (rand, tag, i, 0, 0, rval);
for (int d = 0; d < 3; d++) {
X[d][i] += dt_stream * U[d][i] + dt_noise * (2. * rval[d] - 1.);
}
}
}
else {
#pragma omp parallel for
for (int i = 0; i < Np; i++) {
for (int d = 0; d < 3; d++) {
X[d][i] += dt_stream * U[d][i];
}
}
}
}
|
gradfm_adj_mex.c | #include <inttypes.h>
#include <omp.h>
#include "mex.h"
void gradfm_adjf(float *du,
const float *x, const float *y, const float *z,
const uint8_t *G, const double *h, const size_t *sz);
void gradfm_adjd(double *du,
const double *x, const double *y, const double *z,
const uint8_t *G, const double *h, const size_t *sz);
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 6) || (nlhs > 1)) {
mexErrMsgTxt("Usage: gradfm_adj_mex(du, x, y, z, G, h);");
return;
}
const uint8_t *G = (const uint8_t *)mxGetData(prhs[4]);
const double *h = (const double *)mxGetData(prhs[5]);
const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]);
if (mxIsSingle(prhs[0])) {
float *du = (float *)mxGetData(prhs[0]);
const float *x = (const float *)mxGetData(prhs[1]);
const float *y = (const float *)mxGetData(prhs[2]);
const float *z = (const float *)mxGetData(prhs[3]);
gradfm_adjf(du, x, y, z, G, h, sz);
} else {
double *du = (double *)mxGetData(prhs[0]);
const double *x = (const double *)mxGetData(prhs[1]);
const double *y = (const double *)mxGetData(prhs[2]);
const double *z = (const double *)mxGetData(prhs[3]);
gradfm_adjd(du, x, y, z, G, h, sz);
}
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(1.0);
}
return;
}
void
gradfm_adjf(float *du,
const float *x, const float *y, const float *z,
const uint8_t *G, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
float dx, dy, dz;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t nxnynz = nx*ny*nz;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const float hx = (float)(-1.0/h[0]);
const float hy = (float)(-1.0/h[1]);
const float hz = (float)(-1.0/h[2]);
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if(nxnynz > 16*16*16)
for(k = 0; k < nxnynz; k += nxny) {
for(j = 0; j < nxny; j += nx) {
l = j + k;
for(i = 0; i < nx; ++i, ++l) {
if (G[l]) {
dz =
(k > 0) && G[l-nxny] ? hz*(z[l]-z[l-nxny]) :
(k < NZ) && G[l+nxny] ? hz*(z[l+nxny]-z[l]) :
0.0f;
dy =
(j > 0) && G[l-nx] ? hy*(y[l]-y[l-nx]) :
(j < NY) && G[l+nx] ? hy*(y[l+nx]-y[l]) :
0.0f;
dx =
(i > 0) && G[l-1] ? hx*(x[l]-x[l-1]) :
(i < NX) && G[l+1] ? hx*(x[l+1]-x[l]) :
0.0f;
du[l] = dx + dy + dz;
}
}
}
}
return;
}
void
gradfm_adjd(double *du,
const double *x, const double *y, const double *z,
const uint8_t *G, const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
double dx, dy, dz;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t nxnynz = nx*ny*nz;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const double hx = -1.0/h[0];
const double hy = -1.0/h[1];
const double hz = -1.0/h[2];
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if(nxnynz > 16*16*16)
for(k = 0; k < nxnynz; k += nxny) {
for(j = 0; j < nxny; j += nx) {
l = j + k;
for(i = 0; i < nx; ++i, ++l) {
if (G[l]) {
dz =
(k > 0) && G[l-nxny] ? hz*(z[l]-z[l-nxny]) :
(k < NZ) && G[l+nxny] ? hz*(z[l+nxny]-z[l]) :
0.0;
dy =
(j > 0) && G[l-nx] ? hy*(y[l]-y[l-nx]) :
(j < NY) && G[l+nx] ? hy*(y[l+nx]-y[l]) :
0.0;
dx =
(i > 0) && G[l-1] ? hx*(x[l]-x[l-1]) :
(i < NX) && G[l+1] ? hx*(x[l+1]-x[l]) :
0.0;
du[l] = dx + dy + dz;
}
}
}
}
return;
}
|
main.h | #ifndef _MAIN_CK_H
#define _MAIN_CK_H
#include <util/time_cps.h>
#include <alg/a2a/grid_lanczos.h>
#include <alg/ktopipi_jobparams.h>
//Useful functions for main programs
CPS_START_NAMESPACE
void ReadGaugeField(const MeasArg &meas_arg, bool double_latt = false){
double time = -dclock();
const char *cname = "main";
const char *fname = "ReadGaugeField";
GwilsonFdwf lat;
std::ostringstream os;
os << meas_arg.GaugeStem << '.' << meas_arg.TrajCur;
std::string lat_file = os.str();
ReadLatticeParallel rl;
if(double_latt) rl.disableGparityReconstructUstarField();
rl.read(lat,lat_file.c_str());
if(!rl.good())ERR.General(cname,fname,"Failed read lattice %s",lat_file.c_str());
time += dclock();
print_time(cname,fname,time);
}
void ReadRngFile(const MeasArg &meas_arg, bool double_latt = false){
double time = -dclock();
const char *cname = "main";
const char *fname = "ReadRngFile";
std::ostringstream os;
os << meas_arg.RNGStem << '.' << meas_arg.TrajCur;
std::string rng_file = os.str();
if(!LRG.Read(rng_file.c_str())) ERR.General(cname,fname,"Failed read rng file %s",rng_file.c_str());
time += dclock();
print_time(cname,fname,time);
}
#ifdef USE_BFM
template<typename mf_Float>
void setMass(bfm_evo<mf_Float> &dwf, const double &mass){
dwf.mass = mass;
dwf.GeneralisedFiveDimEnd(); // reinitialising since using a new mass
dwf.GeneralisedFiveDimInit();
}
void setup_bfmargs(bfmarg &dwfa, int nthread, const BfmSolver &solver = HmCayleyTanh, const double mobius_scale = 1.){
if(!UniqueID()) printf("Setting up bfmargs\n");
omp_set_num_threads(nthread);
dwfa.node_latt[0] = GJP.XnodeSites();
dwfa.node_latt[1] = GJP.YnodeSites();
dwfa.node_latt[2] = GJP.ZnodeSites();
dwfa.node_latt[3] = GJP.TnodeSites();
multi1d<int> ncoor(4);
multi1d<int> procs(4);
for(int i=0;i<4;i++){ ncoor[i] = GJP.NodeCoor(i); procs[i] = GJP.Nodes(i); }
if(GJP.Gparity()){
dwfa.gparity = 1;
if(!UniqueID()) printf("G-parity directions: ");
for(int d=0;d<3;d++)
if(GJP.Bc(d) == BND_CND_GPARITY){ dwfa.gparity_dir[d] = 1; printf("%d ",d); }
else dwfa.gparity_dir[d] = 0;
for(int d=0;d<4;d++){
dwfa.nodes[d] = procs[d];
dwfa.ncoor[d] = ncoor[d];
}
if(!UniqueID()) printf("\n");
}
dwfa.verbose=1;
dwfa.reproduce=0;
bfmarg::Threads(nthread);
bfmarg::Reproduce(0);
bfmarg::ReproduceChecksum(0);
bfmarg::ReproduceMasterCheck(0);
bfmarg::Verbose(1);
for(int mu=0;mu<4;mu++){
if ( procs[mu]>1 ) {
dwfa.local_comm[mu] = 0;
if(!UniqueID()) printf("Non-local comms in direction %d\n",mu);
} else {
dwfa.local_comm[mu] = 1;
if(!UniqueID()) printf("Local comms in direction %d\n",mu);
}
}
dwfa.precon_5d = 1;
if(solver == HmCayleyTanh){
dwfa.precon_5d = 0; //mobius uses 4d preconditioning
dwfa.mobius_scale = mobius_scale;
}
dwfa.Ls = GJP.SnodeSites();
dwfa.solver = solver;
dwfa.M5 = toDouble(GJP.DwfHeight());
dwfa.mass = toDouble(0.01);
dwfa.Csw = 0.0;
dwfa.max_iter = 20000;
dwfa.residual = 1e-08;
if(!UniqueID()) printf("Finished setting up bfmargs\n");
}
void test_eigenvectors(BFM_Krylov::Lanczos_5d<double> &eig, bfm_evo<double> & dwf, bool singleprec_evecs){
const int len = 24 * dwf.node_cbvol * (1 + dwf.gparity) * dwf.cbLs;
omp_set_num_threads(bfmarg::threads);
Fermion_t bq_tmp = singleprec_evecs ? dwf.allocCompactFermion() : dwf.allocFermion();
Fermion_t tmp1 = dwf.allocFermion();
Fermion_t tmp2 = dwf.allocFermion();
Fermion_t tmp3 = dwf.allocFermion();
if(!UniqueID()) printf("Computing eigenvector residuals\n");
for(int i=0;i<eig.get;i++){
if(singleprec_evecs){ // eig->bq is in single precision
#pragma omp parallel for //Bet I could reduce the threading overheads by parallelizing this entire method
for(int j = 0; j < len; j++) {
((double*)bq_tmp)[j] = ((float*)(eig.bq[i][1]))[j];
}
}else{
#pragma omp parallel
{
dwf.axpy(bq_tmp, eig.bq[i][1], eig.bq[i][1], 0.);
}
}
double nrm_boss;
#pragma omp parallel
{
dwf.Mprec(bq_tmp,tmp1,tmp3, 0);
dwf.Mprec(tmp1, tmp2, tmp3, 1); //tmp2 = M M^dag v
//M M^dag v = lambda v
dwf.set_zero(tmp1);
dwf.axpy(tmp3, bq_tmp, tmp1, eig.evals[i]); //tmp3 = lambda v
double nrm = dwf.axpy_norm(tmp1, tmp2, tmp3, -1.); //tmp1 = tmp3 - tmp2
if(dwf.isBoss()) nrm_boss = sqrt(nrm); //includes global sum
}
if(!UniqueID()) printf("%d %g\n",i,nrm_boss);
}
dwf.freeFermion(bq_tmp);
dwf.freeFermion(tmp1);
dwf.freeFermion(tmp2);
dwf.freeFermion(tmp3);
}
#endif
#if defined(USE_GRID_LANCZOS)
template<typename GridPolicies>
void test_eigenvectors(const std::vector<typename GridPolicies::GridFermionField> &evec, const std::vector<Grid::RealD> &eval, const double mass, typename GridPolicies::FgridGFclass &lattice){
typedef typename GridPolicies::GridFermionField GridFermionField;
typedef typename GridPolicies::FgridFclass FgridFclass;
typedef typename GridPolicies::GridDirac GridDirac;
Grid::GridCartesian *UGrid = lattice.getUGrid();
Grid::GridRedBlackCartesian *UrbGrid = lattice.getUrbGrid();
Grid::GridCartesian *FGrid = lattice.getFGrid();
Grid::GridRedBlackCartesian *FrbGrid = lattice.getFrbGrid();
Grid::QCD::LatticeGaugeFieldD *Umu = lattice.getUmu();
double mob_b = lattice.get_mob_b();
double mob_c = mob_b - 1.; //b-c = 1
double M5 = GJP.DwfHeight();
typename GridDirac::ImplParams params;
lattice.SetParams(params);
GridDirac Ddwf(*Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_c, params);
Grid::SchurDiagMooeeOperator<GridDirac, GridFermionField> HermOp(Ddwf);
GridFermionField tmp1(FrbGrid);
GridFermionField tmp2(FrbGrid);
GridFermionField tmp3(FrbGrid);
for(int i=0;i<evec.size();i++){
HermOp.Mpc(evec[i], tmp1);
HermOp.MpcDag(tmp1, tmp2); //tmp2 = M^dag M v
tmp3 = eval[i] * evec[i]; //tmp3 = lambda v
double nrm = sqrt(axpy_norm(tmp1, -1., tmp2, tmp3)); //tmp1 = tmp3 - tmp2
if(!UniqueID()) printf("%d %g\n",i,nrm);
}
}
#endif
//Keep code clean by wrapping BFM or Grid underlay
struct LatticeSolvers{
#if !defined(USE_BFM_LANCZOS) && !defined(USE_BFM_A2A)
LatticeSolvers(const JobParams &jp, const int nthreads){
omp_set_num_threads(nthreads);
}
#else
bfm_evo<double> dwf_d;
bfm_evo<float> dwf_f;
bfmarg dwfa;
LatticeSolvers(const JobParams &jp, const int nthreads){
//Initialize both a double and single precision instance of BFM
BfmSolver solver;
switch(jp.solver){
case BFM_DWF:
solver = DWF; break;
case BFM_HmCayleyTanh:
solver = HmCayleyTanh; break;
default:
ERR.General("LatticeSolvers","constructor","Unknown solver\n");
}
setup_bfmargs(dwfa,nthreads,solver,jp.mobius_scale);
dwf_d.init(dwfa);
dwf_d.comm_end(); dwf_f.init(dwfa); dwf_f.comm_end(); dwf_d.comm_init();
}
~LatticeSolvers(){
dwf_d.end();
dwf_f.end();
}
#endif
};
template<typename LattType>
struct LatticeSetup{
# if defined(USE_BFM_LANCZOS) || defined(USE_BFM_A2A)
static void importBFMlattice(Lattice *lat, LatticeSolvers &solvers){
lat->BondCond(); //Apply the boundary conditions!
Float* gauge = (Float*) lat->GaugeField();
solvers.dwf_d.cps_importGauge(gauge);
solvers.dwf_d.comm_end();
solvers.dwf_f.comm_init(); solvers.dwf_f.cps_importGauge(gauge); solvers.dwf_f.comm_end();
solvers.dwf_d.comm_init();
lat->BondCond(); //Un-apply the boundary conditions!
}
#endif
typedef LattType LatticeType;
LatticeType *lat;
//Grid or Grid/BFM mixed
#if defined(USE_GRID_LANCZOS) || defined(USE_GRID_A2A)
LatticeSetup(const JobParams &jp, LatticeSolvers &solvers){
assert(jp.solver == BFM_HmCayleyTanh);
FgridParams grid_params;
grid_params.mobius_scale = jp.mobius_scale;
lat = new LatticeType(grid_params);
//lat->ImportGauge(); //lattice -> Grid (applied APRD - signs internally then reverses)
NullObject null_obj;
lat->BondCond();
CPSfield<cps::ComplexD,4*9,FourDpolicy,OneFlavorPolicy> cps_gauge((cps::ComplexD*)lat->GaugeField(),null_obj);
cps_gauge.exportGridField(*lat->getUmu());
lat->BondCond();
# if defined(USE_BFM_LANCZOS) || defined(USE_BFM_A2A)
importBFMlattice(lat,solvers);
# endif
}
#else
//BFM only
LatticeSetup(const JobParams &jp, LatticeSolvers &solvers){
lat = new LatticeType; //doesn't actually matter
importBFMlattice(lat,solvers);
}
#endif
LatticeType & getLattice(){ return *lat; }
~LatticeSetup(){
delete lat;
}
};
//Generates and stores evecs and evals
template<typename GridPolicies = void>
struct Lanczos{
#if defined(USE_GRID_LANCZOS)
std::vector<Grid::RealD> eval;
std::vector<typename GridPolicies::GridFermionField> evec;
std::vector<typename GridPolicies::GridFermionFieldF> evec_f;
double mass;
double resid;
//For precision change
Grid::GridCartesian *UGrid_f;
Grid::GridRedBlackCartesian *UrbGrid_f;
Grid::GridCartesian *FGrid_f;
Grid::GridRedBlackCartesian *FrbGrid_f;
Lanczos(): UGrid_f(NULL), UrbGrid_f(NULL), FGrid_f(NULL), FrbGrid_f(NULL){}
void compute(const LancArg &lanc_arg, LatticeSolvers &solvers, typename GridPolicies::FgridGFclass &lat){
mass = lanc_arg.mass;
resid = lanc_arg.stop_rsd;
#ifdef A2A_LANCZOS_SINGLE
//Make single precision Grids
int Ls = GJP.Snodes()*GJP.SnodeSites();
std::vector<int> nodes(4);
std::vector<int> vol(4);
for(int i=0;i<4;i++){
vol[i]= GJP.NodeSites(i)*GJP.Nodes(i);;
nodes[i]= GJP.Nodes(i);
}
std::vector<int> simd_layout = Grid::GridDefaultSimd(Grid::QCD::Nd,Grid::vComplexF::Nsimd());
if(!UniqueID()) printf("Created single-prec Grids: nodes (%d,%d,%d,%d) vol (%d,%d,%d,%d) and SIMD layout (%d,%d,%d,%d)\n",nodes[0],nodes[1],nodes[2],nodes[3],vol[0],vol[1],vol[2],vol[3],simd_layout[0],simd_layout[1],simd_layout[2],simd_layout[3]);
UGrid_f = Grid::QCD::SpaceTimeGrid::makeFourDimGrid(vol,simd_layout,nodes);
UrbGrid_f = Grid::QCD::SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_f);
FGrid_f = Grid::QCD::SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_f);
FrbGrid_f = Grid::QCD::SpaceTimeGrid::makeFiveDimRedBlackGrid(GJP.SnodeSites()*GJP.Snodes(),UGrid_f);
gridSinglePrecLanczos<GridPolicies>(eval,evec_f,lanc_arg,lat,UGrid_f,UrbGrid_f,FGrid_f,FrbGrid_f);
#else
gridLanczos<GridPolicies>(eval,evec,lanc_arg,lat);
# ifndef MEMTEST_MODE
test_eigenvectors<GridPolicies>(evec,eval,lanc_arg.mass,lat);
# endif
#endif
}
void toSingle(){
typedef typename GridPolicies::GridFermionField GridFermionField;
typedef typename GridPolicies::GridFermionFieldF GridFermionFieldF;
//Make a single precision 5D checkerboarded Grid
std::vector<int> nodes(4);
std::vector<int> vol(4);
for(int i=0;i<4;i++){
vol[i]= GJP.NodeSites(i)*GJP.Nodes(i);;
nodes[i]= GJP.Nodes(i);
}
UGrid_f = Grid::QCD::SpaceTimeGrid::makeFourDimGrid(vol,Grid::GridDefaultSimd(Grid::QCD::Nd,Grid::vComplexF::Nsimd()),nodes);
FrbGrid_f = Grid::QCD::SpaceTimeGrid::makeFiveDimRedBlackGrid(GJP.SnodeSites()*GJP.Snodes(),UGrid_f);
int nev = evec.size();
for(int i=0;i<nev;i++){
GridFermionFieldF tmp_f(FrbGrid_f);
#ifndef MEMTEST_MODE
precisionChange(tmp_f, evec.back());
#endif
evec.pop_back();
evec_f.push_back(std::move(tmp_f));
}
//These are in reverse order!
std::reverse(evec_f.begin(), evec_f.end());
}
void freeEvecs(){
std::vector<typename GridPolicies::GridFermionField>().swap(evec); //evec.clear();
std::vector<typename GridPolicies::GridFermionFieldF>().swap(evec_f);
if(UGrid_f != NULL) delete UGrid_f;
if(UrbGrid_f != NULL) delete UrbGrid_f;
if(FGrid_f != NULL) delete FGrid_f;
if(FrbGrid_f != NULL) delete FrbGrid_f;
}
#else
BFM_Krylov::Lanczos_5d<double> *eig;
Lanczos(): eig(NULL){}
void compute(const LancArg &lanc_arg, LatticeSolvers &solvers, Lattice &lat){
eig = new BFM_Krylov::Lanczos_5d<double>(solvers.dwf_d,const_cast<LancArg&>(lanc_arg)); //sets up the mass of dwf_d correctly
eig->Run();
solvers.dwf_f.mass = solvers.dwf_d.mass; //keep the single-prec solver in sync
solvers.dwf_f.GeneralisedFiveDimEnd(); // reinitialising since using a new mass
solvers.dwf_f.GeneralisedFiveDimInit();
test_eigenvectors(*eig,solvers.dwf_d,false);
}
void toSingle(){
eig->toSingle();
//Test the single-prec converted eigenvectors to make sure we haven't dropped too much precision
test_eigenvectors(*eig,eig->dop,true);
}
void freeEvecs(){
eig->free_bq();
}
~Lanczos(){
if(eig != NULL)
delete eig;
}
#endif
};
template<typename mf_Policies, typename LanczosPolicies>
struct computeA2Avectors{
static void compute(A2AvectorV<mf_Policies> &V, A2AvectorW<mf_Policies> &W, bool mixed_solve, bool evecs_single_prec, Lattice &lat, Lanczos<LanczosPolicies> &eig, LatticeSolvers &solvers){
#ifdef USE_BFM_LANCZOS
W.computeVW(V, lat, *eig.eig, evecs_single_prec, solvers.dwf_d, mixed_solve ? & solvers.dwf_f : NULL);
#else
if(evecs_single_prec)
W.computeVW(V, lat, eig.evec_f, eig.eval, eig.mass, eig.resid, 10000);
else
W.computeVW(V, lat, eig.evec, eig.eval, eig.mass, eig.resid, 10000);
#endif
}
};
template<typename ComplexType>
void setupFieldParams(cps::NullObject &n){}
#ifdef USE_GRID
template<typename ComplexType>
void setupFieldParams(typename FourDSIMDPolicy::ParamType &p){
int nsimd = ComplexType::Nsimd();
FourDSIMDPolicy::SIMDdefaultLayout(p,nsimd,2); //only divide over spatial directions
printf("4D field params: Nsimd = %d, SIMD dimensions:\n", nsimd);
for(int i=0;i<4;i++)
printf("%d ", p[i]);
printf("\n");
}
template<typename ComplexType>
void setupFieldParams(typename ThreeDSIMDPolicy::ParamType &p){
int nsimd = ComplexType::Nsimd();
ThreeDSIMDPolicy::SIMDdefaultLayout(p,nsimd);
printf("3D field params: Nsimd = %d, SIMD dimensions:\n", nsimd);
for(int i=0;i<3;i++)
printf("%d ", p[i]);
printf("\n");
}
#endif
CPS_END_NAMESPACE
#endif
|
CoarseAndFine.c | #include <omp.h>
#include <stdio.h>
#include <math.h>
#include<stdlib.h>
#define ArraySize 32678
float Array[ArraySize];
float Ranf(float min, float max) {
return min + (((float) rand() * (max-min))/ (float) (RAND_MAX));
}
int main( int argc, char *argv[ ] ) {
int i,j;
double prod;
omp_set_num_threads( NUMT );
for (i=0; i< ArraySize-1 ;i++) {
Array[i] = Ranf( -1.f, 1.f );
}
double time0= omp_get_wtime( );
#pragma omp parallel for shared(Array) private(prod) schedule(TYPE,CHUNK)
for (i=0; i< ArraySize-1 ;i++) {
prod=1.0;
for(j=0; j<=i ;j++) {
prod*=Array[j];
}
}
double time1=omp_get_wtime( );
//printf("Execution time for = %lf\n",time1-time0);
long int numMuled = (long int)ArraySize * (long int)(ArraySize+1) / 2;
printf("MegaMults/sec = %10.2lf\n",(double)numMuled/(time1-time0)/1000000.);
return 0;
}
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <string>
#include <vector>
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML
#include "mkldnn.hpp"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
namespace tensorflow {
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#ifndef INTEL_MKL_ML
// Forward decl
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
// TODO(nhasabni): Why do we restrict this to 4D?
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
typedef std::vector<MklShape> MklShapeList;
#ifndef INTEL_MKL_ML
typedef std::vector<MklDnnShape> MklDnnShapeList;
#endif
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
#ifdef INTEL_MKL_ML
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
TF_CHECK_OK(
Status(error::Code::UNIMPLEMENTED, "Unimplemented conversion function"));
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#ifndef INTEL_MKL_ML
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#ifndef INTEL_MKL_ML
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#ifndef INTEL_MKL_ML
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#endif
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
#ifdef INTEL_MKL_ML
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
#endif
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to get rid of compiler warning
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by perserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void* SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
return user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
};
#endif // INTEL_MKL_ML
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
GB_unop__tanh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tanh_fp32_fp32)
// op(A') function: GB (_unop_tran__tanh_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = tanhf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tanhf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = tanhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TANH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tanh_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = tanhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = tanhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tanh_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mea_pb_traco.c | #include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <omp.h>
#include <math.h>
#define min(a,b) (((a)<(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
#define max(a,b) (((a)>(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
double ** Q;
double ** Qbp;
double ** Pbp;
double ** Pu;
double ** M;
int Ebp = 0; // Energy weight of base pair -2, -1, 0, 1, 2
int RT = 1; // 'Normalized' temperature 1,2,3,4,5
float ERT;
int l = 0; //minimum loop length 0-5
int delta = 1; // Base pair weighting 1-5
char * RNA; //only ACGU
int N;
int DIM;
#include "../mem.h"
int paired(int i, int j) {
char nt1 = RNA[i];
char nt2 = RNA[j];
if ((nt1 == 'A' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'A') ||
(nt1 == 'G' && nt2 == 'C') || (nt1 == 'C' && nt2 == 'G') ||
(nt1 == 'G' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'G')){
return 1;}
else
return 0;
}
int main(int argc, char *argv[]){
int num_proc=1;
int i,j,k,ll,p,q;
int c0, c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c15;
int t1, t2, t3, t4, t5, t6,t7;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
ERT = exp((float)-Ebp/(float)RT);
srand(time(NULL));
if(argc > 1)
num_proc = atoi(argv[1]);
int kind=1;
N = 8;
DIM = 12;
if(argc > 2)
N = atoi(argv[2]);
DIM = N+10;
if(argc > 3)
kind = atoi(argv[3]);
omp_set_num_threads(num_proc);
//printf(" -exp(Ebp/RT) = %5.3f\n", ERT);
RNA = (char*) malloc(DIM * sizeof(char*)); //read from FASTA file
rand_seq(RNA, N);
//printf("Sequence: ");
//for(i=0; i<N; i++)
// printf("%c", RNA[i]);
//printf("\n\n");
Q = memd();
Qbp = memd();
Pbp = memd();
Pu = memd();
M = memd();
rna_array_init(Q, 1, 1);
rna_array_init(Qbp, 0, 0);
rna_array_init(Pbp, 0, 0);
rna_array_init(Pu, 0, 0);
rna_array_init(M, 0, 0);
double start = omp_get_wtime();
// compute the partition functions Q and Qbp
if(kind==1){
#pragma scop
for(i=0; i<N; i++){
for(j=i+1; j<N; j++){
Pbp[i][j] = (Q[0][i]*Q[j][N-1]*Qbp[i][j])/Q[0][N-1]; // Pbp[i][j] = (Q[1][i]*Q[j+1][N]*Qbp[i][j])/Q[0][N-1];
for(p=0; p<i; p++){
for(q=j+1; q<N; q++){
Pbp[i][j] += (Pbp[p][q] * ERT * Q[p+1][i] * Qbp[i][j] * Q[j+1][q-1]) / (Qbp[p][q] ==0 ? 1 : Qbp[p][q]) ;
}
}
}
}
#pragma endscop
}
if(kind==2) // pluto
{
printf("pluto\n");
lbp=0;
ubp=floord(N-2,16);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=t2;t3<=floord(N-1,16);t3++) {
for (t4=16*t2;t4<=min(min(N-2,16*t2+15),16*t3+14);t4++) {
lbv=max(16*t3,t4+1);
ubv=min(N-1,16*t3+15);
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
Pbp[t4][t5] = (Q[0][t4]*Q[t5][N-1]*Qbp[t4][t5])/Q[0][N-1];;
}
}
}
}
/*
for (t2=0;t2<=floord(N-4,8);t2++) {
lbp=max(0,ceild(16*t2-N+3,16));
ubp=floord(t2,2);
#pragma omp parallel for private(lbv,ubv,t4,t5,t6,t7)
for (t3=lbp;t3<=ubp;t3++) {
for (t4=max(16*t2-16*t3,16*t3+1);t4<=min(N-3,16*t2-16*t3+15);t4++) {
for (t5=16*t3;t5<=min(16*t3+15,t4-1);t5++) {
for (t6=t4+1;t6<=N-2;t6++) {
for (t7=t6+1;t7<=N-1;t7++) {
Pbp[t4][t6] += (Pbp[t5][t7] * ERT * Q[t5+1][t4] * Qbp[t4][t6] * Q[t6+1][t7-1]) / (Qbp[t5][t7] ==0 ? 1 : Qbp[t5][t7]);
}
}
}
}
}
}
*/
if (N >= 4) {
for (t1=1;t1<=floord(17*N-52,16);t1++) {
lbp=max(0,t1-N+3);
ubp=floord(t1-1,17);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6)
for (t2=lbp;t2<=ubp;t2++) {
for (t4=16*t2;t4<=min(16*t2+15,t1-t2-1);t4++) {
for (t5=t1-t2+1;t5<=N-2;t5++) {
for (t6=t5+1;t6<=N-1;t6++) {
Pbp[(t1-t2)][t5] += (Pbp[t4][t6] * ERT * Q[t4+1][(t1-t2)] * Qbp[(t1-t2)][t5] * Q[t5+1][t6-1]) / (Qbp[t4][t6] ==0 ? 1 : Qbp[t4][t6]);
}
}
}
}
}
}
}
if(kind==3) // traco
{
printf("traco\n");
lbp=0;
ubp=floord(N-2,16);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=t2;t3<=floord(N-1,16);t3++) {
for (t4=16*t2;t4<=min(min(N-2,16*t2+15),16*t3+14);t4++) {
lbv=max(16*t3,t4+1);
ubv=min(N-1,16*t3+15);
#pragma ivdep
#pragma vector always
for (t5=lbv;t5<=ubv;t5++) {
Pbp[t4][t5] = (Q[0][t4]*Q[t5][N-1]*Qbp[t4][t5])/Q[0][N-1];;
}
}
}
}
for( c1 = 1; c1 < N - 2; c1 += 1)
#pragma omp parallel for schedule(dynamic, 1)
for( c3 = 0; c3 <= (N - c1 - 3) / 16; c3 += 1)
for( c5 = 0; c5 <= (c1 - 1) / 16; c5 += 1)
for( c7 = 0; c7 <= -c3 + (N - c1 - 3) / 16; c7 += 1)
for( c11 = c1 + 16 * c3 + 1; c11 <= min(c1 + 16 * c3 + 16, N - 16 * c7 - 2); c11 += 1) {
if (N >= 16 * c7 + c11 + 18) {
for( c15 = 16 * c7 + c11 + 1; c15 <= 16 * c7 + c11 + 16; c15 += 1)
Pbp[c1][c11] += (Pbp[16*c5][c15] * ERT * Q[16*c5+1][c1] * Qbp[c1][c11] * Q[c11+1][c15-1]) / (Qbp[16*c5][c15] ==0 ? 1 : Qbp[16*c5][c15]) ;
} else {
for( c13 = 16 * c5; c13 <= min(c1 - 1, 16 * c5 + 15); c13 += 1) {
if (c13 >= 16 * c5 + 1)
for( c15 = c11 + 1; c15 <= 16 * c7 + c11; c15 += 1)
Pbp[c1][c11] += (Pbp[c13][c15] * ERT * Q[c13+1][c1] * Qbp[c1][c11] * Q[c11+1][c15-1]) / (Qbp[c13][c15] ==0 ? 1 : Qbp[c13][c15]) ;
for( c15 = 16 * c7 + c11 + 1; c15 < N; c15 += 1)
Pbp[c1][c11] += (Pbp[c13][c15] * ERT * Q[c13+1][c1] * Qbp[c1][c11] * Q[c11+1][c15-1]) / (Qbp[c13][c15] ==0 ? 1 : Qbp[c13][c15]) ;
}
}
}
}
if(kind==4) // traco tstile
{
}
double stop = omp_get_wtime();
printf("%.4f\n",stop - start);
//printf("Q\n");
//rna_array_print(Q);
//printf("Qbp\n");
//rna_array_print(Qbp);
exit(0);
printf("Pbp\n");
rna_array_print(Pbp);
#pragma scop
for(i=N-1; i>=0; i--){
for(j=i+1; j<N; j++){
Pu[i][j] = (Q[0][i]*Q[j][N-1]*1)/Q[0][N-1];
for(p=0; p<i; p++){
for(q=j+1; q<N; q++){
Pu[i][j] += (Pbp[p][q] * ERT * Q[p+1][i] * 1 * Q[j+1][q-1]) / (Qbp[p][q] ==0 ? 1 : Qbp[p][q]) ;
}
}
}
}
#pragma endscop
printf("Pu\n");
rna_array_print(Pu);
double * Puu = (double*)malloc(DIM * sizeof(double));
#pragma scop
for(i=0; i<=N; i++){
Puu[i] = 1;
for(j=i+1; j<N; j++){
Puu[i] += -1 * Pbp[i][j+1];
}
for(k=0; k<i; k++){
Puu[i] += -1 * Pbp[k][i+1];
}
}
#pragma endscop
printf("Puu\n");
for(i=0; i<N-1; i++)
printf("%3.3f ", Puu[i]);
printf("\n");
#pragma scop
for(i=N-1; i>=0; i--){
for(j=i+1; j<N; j++){
for(k=0; k<j-i-l; k++){
M[i][j] = MAX(M[i][j], M[i][k+i-1] + M[k+i+1][j-1] + delta*Pbp[k+i][j])*paired(k+i,j-1);
}
M[i][j] = MAX(M[i][j], M[i][j-1] + Puu[j-1]);
}
}
#pragma endscop
printf("M\n");
rna_array_print(M);
return 0;
}
|
filter.c | #ifndef _FILTER_
#define _FILTER_
#include "Parameter_files/INIT_PARAMS.H"
/*
Function FILTER filters the k-space box, <box>, using filter type
<filter_type> on a characteristic comoving scale <R> (in Mpc), where:
0 = top-hat real space filter
1 = top-hat k-space filter
2 = gaussian
Relavant box parameters are taken from INIT_PARAMS.H
The function returns the filtered k field, <box>.
*/
void filter(fftwf_complex *box, int filter_type, float R){
int n_x, n_z, n_y;
float k_x, k_y, k_z, k_mag, kR;
// loop through k-box
#pragma omp parallel shared(box, filter_type, R) private(k_x, k_y, k_z, k_mag, kR, n_x, n_z, n_y)
{
#pragma omp for
for (n_x=0; n_x<DIM; n_x++){
if (n_x>MIDDLE) {k_x =(n_x-DIM) * DELTA_K;}
else {k_x = n_x * DELTA_K;}
for (n_y=0; n_y<DIM; n_y++){
if (n_y>MIDDLE) {k_y =(n_y-DIM) * DELTA_K;}
else {k_y = n_y * DELTA_K;}
for (n_z=0; n_z<=MIDDLE; n_z++){
k_z = n_z * DELTA_K;
k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z);
kR = k_mag*R; // real space top-hat
if (filter_type == 0){ // real space top-hat
if (kR > 1e-4){
box[C_INDEX(n_x, n_y, n_z)] *= 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));
}
}
else if (filter_type == 1){ // k-space top hat
kR *= 0.413566994; // equates integrated volume to the real space top-hat (9pi/2)^(-1/3)
if (kR > 1){
box[C_INDEX(n_x, n_y, n_z)] = 0;
}
}
else if (filter_type == 2){ // gaussian
kR *= 0.643; // equates integrated volume to the real space top-hat
box[C_INDEX(n_x, n_y, n_z)] *= pow(E, -kR*kR/2.0);
}
else{
if ( (n_x==0) && (n_y==0) && (n_z==0) )
fprintf(stderr, "filter.c: Warning, filter type %i is undefined\nBox is unfiltered\n", filter_type);
}
}
}
} // end looping through k box
}
return;
}
#endif
|
fox_floats_timer_caching_omp_fileIO_benchmark.c | /* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices
*
* Implementation of parallel matrix multiplication:
* LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$
*
* Input:
* Input Matrix file name: A.dat, B.dat
*
* Output:
* Output Matrix file name: C.dat
* Output Sub-matrices file name: SubMatrices.dat
*
* Notes:
* 1. Assumes the number of processes is a perfect square
* 2. The array member of the matrices is statically allocated
*
* See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI
*/
/* Compiler command:
* mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c
* -o fox_floats_timer_caching_omp_fileIO_benchmark
*
* Run command:
* mpirun -n -4 ./fox_floats_timer_caching_omp
*/
/* Head files */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
// define problem scale, matrix row/col size
#define PROBLEM_SCALE 4096
// define whether or not Print Matices in the Command Line
#define PRINT_A 0
#define PRINT_B 0
#define PRINT_C 0
#define PRINT_LOCAL_A 0
#define PRINT_LOCAL_B 0
#define PRINT_LOCAL_C 0
// define float precision, 4 byte single-precision float or 8 byte double-precision float
#define FLOAT double
#define FLOAT_MPI MPI_DOUBLE
// Define threads speed-up affnity in the computing
#define NUM_THREADS 8
// Define threads affinity "scatter" or "compact"
#define AFFINITY "KMP_AFFINITY = compact"
/* Type define structure of process grid */
typedef struct {
int p; /* Total number of processes */
MPI_Comm comm; /* Communicator for entire grid */
MPI_Comm row_comm; /* Communicator for my row */
MPI_Comm col_comm; /* Communicator for my col */
int q; /* Order of grid */
int my_row; /* My row number */
int my_col; /* My column number */
int my_rank; /* My rank in the grid comm */
} GRID_INFO_T;
/* Type define structure of local matrix */
#define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21)
typedef struct {
int n_bar;
#define Order(A) ((A)->n_bar) // defination with parameters
FLOAT entries[MAX];
#define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference
} LOCAL_MATRIX_T;
/* Function Declarations */
LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar);
void Free_local_matrix(LOCAL_MATRIX_T** local_A);
void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Read matrix A from a file
void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
GRID_INFO_T* grid, int n); // Read matrix B from a file
void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Print matrix A in the command line
void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid, int n); // Print matrix B in the command line
void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Print matrix C in the command line
void Set_to_zero(LOCAL_MATRIX_T* local_A);
void Local_matrix_multiply(LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void Build_matrix_type(LOCAL_MATRIX_T* local_A);
MPI_Datatype local_matrix_mpi_t;
LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer
void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid);
void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid);
void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B,
GRID_INFO_T* grid);
void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Write matrix multiplication to a file
void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix A to a file
void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid); // Write local matrix B to a file
void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix C to a file
/*********************************************************/
main(int argc, char* argv[]) {
FILE *fp;
int p;
int my_rank;
GRID_INFO_T grid;
LOCAL_MATRIX_T* local_A;
LOCAL_MATRIX_T* local_B;
LOCAL_MATRIX_T* local_C;
int n;
int n_bar;
double timer_start;
double timer_end;
int content;
int i;
int j;
void Setup_grid(GRID_INFO_T* grid);
void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
// Matrix Generator
fp = fopen("A.dat", "w"); // Generate and print matrix A into a file
for (i = 0; i < PROBLEM_SCALE; i++) {
for (j = 0; j < PROBLEM_SCALE; j++)
if(i == j){
fprintf(fp,"%d ", 1);
}
else {
fprintf(fp,"%d ", 0);
}
fprintf(fp,"\n");
}
fclose(fp);
fp = fopen("B.dat", "w"); // Generate and print matrix B into a file
for (i = 0; i < PROBLEM_SCALE; i++){
for (j = 0; j < PROBLEM_SCALE; j++)
fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j);
fprintf(fp, "\n");
}
fclose(fp);
// SPMD Mode start from here (Processess fork from here)
MPI_Init(&argc, &argv); // MPI initializing
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
// Initial OpenMP Environment
omp_set_num_threads(NUM_THREADS);
kmp_set_defaults(AFFINITY);
Setup_grid(&grid); // Set up Processess grid
if (my_rank == 0) {
fp = fopen("A.dat","r");
n = 0;
while((content = fgetc(fp)) != EOF)
{
//printf("fgetc = %d\n", content);
if(content != 0x20 && content != 0x0A) n++;
}
fclose(fp);
n = (int) sqrt((double) n);
printf("We read the order of the matrices from A.dat is\n %d\n", n);
// while(fgetc(fp) != EOF) n++;
// printf("What's the order of the matrices?\n");
// scanf("%d", &n); // Overall Matrix's Order
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order
n_bar = n/grid.q; // \bar n is the local matrix's order
local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A
Order(local_A) = n_bar; // Local matrix A's order
Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_A == 1)
Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure)
local_B = Local_matrix_allocate(n_bar); // Allocate local matrix
Order(local_B) = n_bar; // Local matrix B's order
Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_B == 1)
Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure)
Build_matrix_type(local_A); // Buid local_A's MPI matrix data type
temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n
local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C
Order(local_C) = n_bar; // Set matrix local_C's order
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
timer_start = MPI_Wtime(); // Get the MPI wall time
Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function
timer_end = MPI_Wtime(); // Get the MPI wall time
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
if (PRINT_C == 1)
Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
Write_local_matrices_A("Write split of local matrix A into local_A.dat",
local_A, &grid); // Write local matrix A into file
if (PRINT_LOCAL_A == 1)
Print_local_matrices_A("Split of local matrix A",
local_A, &grid); // Print matrix A split in processess
Write_local_matrices_B("Write split of local matrix B into local_B.dat",
local_B, &grid); // Write local matrix B into file, special for row-major storage
if (PRINT_LOCAL_B == 1)
Print_local_matrices_B("Split of local matrix B",
local_B, &grid); // Print matrix B split in processess, special for row-major storage
Write_local_matrices_C("Write split of local matrix C into local_C.dat",
local_C, &grid); // Print matrix C split in processess
if (PRINT_LOCAL_C == 1)
Print_local_matrices_C("Split of local matrix C",
local_C, &grid); // Print matrix C split in processess
Free_local_matrix(&local_A); // Free local matrix local_A
Free_local_matrix(&local_B); // Free local matrix local_B
Free_local_matrix(&local_C); // Free local matrix local_C
if(my_rank == 0)
printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start);
MPI_Finalize(); // MPI finalize, processes join and resource recycle
} /* main */
/*********************************************************/
void Setup_grid(
GRID_INFO_T* grid /* out */) {
int old_rank;
int dimensions[2];
int wrap_around[2];
int coordinates[2];
int free_coords[2];
/* Set up Global Grid Information */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->p));
MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);
/* We assume p is a perfect square */ // but what if it's not a perfect square
grid->q = (int) sqrt((double) grid->p);
dimensions[0] = dimensions[1] = grid->q;
/* We want a circular shift in second dimension. */
/* Don't care about first */
wrap_around[0] = wrap_around[1] = 1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions,
wrap_around, 1, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
MPI_Cart_coords(grid->comm, grid->my_rank, 2,
coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* Set up row communicators */
free_coords[0] = 0;
free_coords[1] = 1;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->row_comm));
/* Set up column communicators */
free_coords[0] = 1;
free_coords[1] = 0;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->col_comm));
} /* Setup_grid */
/*********************************************************/
void Fox(
int n /* in */,
GRID_INFO_T* grid /* in */,
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */
/* matrix of A used during */
/* the current stage */
int stage;
int bcast_root;
int n_bar; /* n/sqrt(p) */
int source;
int dest;
MPI_Status status;
n_bar = n/grid->q;
Set_to_zero(local_C);
/* Calculate addresses for row circular shift of B */
source = (grid->my_row + 1) % grid->q;
dest = (grid->my_row + grid->q - 1) % grid->q;
/* Set aside storage for the broadcast block of A */
temp_A = Local_matrix_allocate(n_bar);
for (stage = 0; stage < grid->q; stage++) {
bcast_root = (grid->my_row + stage) % grid->q;
if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator
MPI_Bcast(local_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(local_A, local_B,
local_C);
} else { // temp_A is a buffer for process P_{ij} to store A_{ij}
MPI_Bcast(temp_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(temp_A, local_B,
local_C);
}
MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer
dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation
} /* for */
} /* Fox */
/*********************************************************/
LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) {
LOCAL_MATRIX_T* temp;
temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T));
return temp;
} /* Local_matrix_allocate */
/*********************************************************/
void Free_local_matrix(
LOCAL_MATRIX_T** local_A_ptr /* in/out */) {
free(*local_A_ptr);
} /* Free_local_matrix */
/*********************************************************/
/* Read and distribute matrix for matrix A:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_A(
char* prompt /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("A.dat","r");
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) {
for (mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp, "%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
*/
} else {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp,"%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status);
}
} /* Read_matrix */
/*********************************************************/
/* Read and distribute matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_B(
char* prompt /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT *temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("B.dat","r");
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) { // process 0 (local)
for (mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
/* scanf("%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
*/
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col); */
} else {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
MPI_Recv(temp, Order(local_B),
FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage
/* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status); */
}
free(temp);
}
} /* Read_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_A */
/*********************************************************/
/* Recive and Print Matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
*(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage
MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm);
}
free(temp);
}
} /* Print_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", Entry(local_C, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_C */
/*********************************************************/
/* Recive and Write Matrix C into a file:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Write_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
fp = fopen("C.dat", "w+");
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col));
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", temp[mat_col]);
// printf("%20.15E ", temp[mat_col]);
}
}
fprintf(fp,"\n");
}
free(temp);
fclose(fp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Write_matrix_C */
/*********************************************************/
/*
* Set local matrix's element to zero
*/
void Set_to_zero(
LOCAL_MATRIX_T* local_A /* out */) {
int i, j;
for (i = 0; i < Order(local_A); i++)
for (j = 0; j < Order(local_A); j++)
Entry(local_A,i,j) = 0.0E0;
} /* Set_to_zero */
/*********************************************************/
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype
/*
Synopsis
int MPI_Type_contiguous(int count,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
Input Parameters
count
replication count (nonnegative integer)
oldtype
old datatype (handle)
*/
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory
MPI_Address(&(local_A->n_bar), &address);
/*
Synopsis
int MPI_Address(const void *location, MPI_Aint *address)
Input Parameters
location
location in caller memory (choice)
Output Parameters
address
address of location (address integer)
*/
displacements[0] = address - start_address;
MPI_Address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t); // Creates a struct datatype
/*
Synopsis
int MPI_Type_struct(int count,
const int *array_of_blocklengths,
const MPI_Aint *array_of_displacements,
const MPI_Datatype *array_of_types,
MPI_Datatype *newtype)
Input Parameters
count
number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths
array_of_blocklengths
number of elements in each block (array)
array_of_displacements
byte displacement of each block (array)
array_of_types
type of elements in each block (array of handles to datatype objects)
Output Parameters
newtype
new datatype (handle)
*/
MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype
/*
Synopsis
int MPI_Type_commit(MPI_Datatype *datatype)
Input Parameters
datatype
datatype (handle)
*/
} /* Build_matrix_type */
/*********************************************************/
/* local matrix multiplication function
* withing OpenMP Thread Acceleration
*/
void Local_matrix_multiply(
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
int i, j, k;
// int my_rank;
// MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split
for (i = 0; i < Order(local_A); i++) {
// printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num());
for (j = 0; j < Order(local_A); j++)
for (k = 0; k < Order(local_B); k++)
Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage
+ Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k)
/* Entry(local_C,i,j) = Entry(local_C,i,j)
+ Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper
*/
}
} /* Local_matrix_multiply */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
printf("%20.15E ", Entry(local_A,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_A */
/*********************************************************/
/* Recive and Print Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_B */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
printf("%20.15E ", Entry(local_C,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_C */
/*********************************************************/
/* Recive and Write Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_A.dat","w+");
printf("%s\n", title);
fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
fprintf(fp,"%20.15E ", Entry(local_A,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_A */
/*********************************************************/
/* Recive and Write Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_B.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_B */
/*********************************************************/
/* Recive and Write Local Matrix C:
* Process 0 print local matrix local_C
* Other Processess send local matrix local_C to process 0
* And process 0 receive local matrix local_C from other processess
*/
void Write_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_C.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
fprintf(fp, "%20.15E ", Entry(local_C,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_C */
|
exchange_boundary_overlap.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// perform a (intra-level) ghost zone exchange on vector id
// NOTE exchange_boundary() only exchanges the boundary.
// It will not enforce any boundary conditions
// BC's are either the responsibility of a separate function or should be fused into the stencil
// The argument shape indicates which of faces, edges, and corners on each box must be exchanged
// If the specified shape exceeds the range of defined shapes, the code will default to STENCIL_SHAPE_BOX (i.e. exchange faces, edges, and corners)
void exchange_boundary(level_type * level, int id, int shape){
double _timeCommunicationStart = getTime();
if(shape>=STENCIL_MAX_SHAPES)shape=STENCIL_SHAPE_BOX; // shape must be < STENCIL_MAX_SHAPES in order to safely index into exchange_ghosts[]
int my_tag = (level->tag<<4) | shape;
// short circuit if no MPI to do...
if( (level->exchange_ghosts[shape].num_blocks[0] + level->exchange_ghosts[shape].num_blocks[1] + level->exchange_ghosts[shape].num_blocks[2]) == 0)return;
#ifdef USE_MPI
int nMessages = level->exchange_ghosts[shape].num_recvs + level->exchange_ghosts[shape].num_sends;
MPI_Request *recv_requests = level->exchange_ghosts[shape].requests;
MPI_Request *send_requests = level->exchange_ghosts[shape].requests + level->exchange_ghosts[shape].num_recvs;
#endif
#ifdef _OPENMP
#warning exchange_boundary_overlap.c must be run with at least 2 threads per process
#pragma omp parallel
{
double _timeStart;
int buffer=0;
int n;
int threadID = omp_get_thread_num();
//int numThreads = omp_get_num_threads();
int numThreads = level->num_threads;
#ifdef USE_MPI
if(threadID==0){
// loop through packed list of MPI receives and prepost Irecv's...
if(level->exchange_ghosts[shape].num_recvs>0){
_timeStart = getTime();
for(n=0;n<level->exchange_ghosts[shape].num_recvs;n++){
MPI_Irecv(level->exchange_ghosts[shape].recv_buffers[n],
level->exchange_ghosts[shape].recv_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[shape].recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
level->timers.ghostZone_recv += (getTime()-_timeStart);
}
} // master thread
#endif
// n-1 threads race thru to pack MPI send buffers... overlap packing with irecv
#ifdef USE_MPI
if(threadID>0){
if(level->exchange_ghosts[shape].num_blocks[0]){
if(threadID==1)_timeStart = getTime();
for(buffer=threadID-1;buffer<level->exchange_ghosts[shape].num_blocks[0];buffer+=(numThreads-1)){ // like schedule(static,1) with n-1 threads
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[0][buffer]);
}
if(threadID==1)level->timers.ghostZone_pack += (getTime()-_timeStart);
}
}
#endif
#pragma omp barrier // master must wait for n-1 threads to complete MPI buffer packing
#ifdef USE_MPI
if(threadID==0){
// master does MPI while other threads race thru
// loop through MPI send buffers and post Isend's...
if(level->exchange_ghosts[shape].num_sends>0){
_timeStart = getTime();
for(n=0;n<level->exchange_ghosts[shape].num_sends;n++){
MPI_Isend(level->exchange_ghosts[shape].send_buffers[n],
level->exchange_ghosts[shape].send_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[shape].send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
level->timers.ghostZone_send += (getTime()-_timeStart);
}
// master waits for MPI to finish...
if(nMessages){
_timeStart = getTime();
MPI_Waitall(nMessages,level->exchange_ghosts[shape].requests,level->exchange_ghosts[shape].status);
level->timers.ghostZone_wait += (getTime()-_timeStart);
}
} // master
#endif
// exchange locally using n-1 threads... overlap local exchange with isend/waitall
if(threadID>0){
if(level->exchange_ghosts[shape].num_blocks[1]){
if(threadID==1)_timeStart = getTime();
for(buffer=threadID-1;buffer<level->exchange_ghosts[shape].num_blocks[1];buffer+=(numThreads-1)){ // like schedule(static,1) with n-1 threads
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[1][buffer]);
}
if(threadID==1)level->timers.ghostZone_local += (getTime()-_timeStart);
}
}
#pragma omp barrier // all threads must wait for MPI to complete before unpacking
// unpack MPI receive buffers
#ifdef USE_MPI
if(level->exchange_ghosts[shape].num_blocks[2]){
if(threadID==0)_timeStart = getTime();
//can't use PRAGMA_THREAD_ACROSS_BLOCKS as it would create a nested parallel region
//#pragma omp for schedule(static,1)
//for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[2];buffer++){
for(buffer=threadID;buffer<level->exchange_ghosts[shape].num_blocks[2];buffer+=numThreads){ // like schedule(static,1) with n threads
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[2][buffer]);
}
if(threadID==0)level->timers.ghostZone_unpack += (getTime()-_timeStart);
}
#endif
} // omp parallel
#else
#error exchange_boundary_overlap.c must be compiled with OpenMP and run with at least 2 threads per process
#endif
level->timers.ghostZone_total += (double)(getTime()-_timeCommunicationStart);
}
|
GB_unop__ceil_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fp64_fp64)
// op(A') function: GB (_unop_tran__ceil_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = ceil (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceil (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = ceil (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
clauses-2.c | /* { dg-skip-if "PR 68733" { hppa*-*-hpux* && { ! lp64 } } } */
struct S { int r; int *s; int t[10]; };
void bar (int *);
void
foo (int *p, int q, struct S t, int i, int j, int k, int l)
{
#pragma omp target map (q), firstprivate (q) /* { dg-error "appears both in data and map clauses" } */
bar (&q);
#pragma omp target map (p[0]) firstprivate (p) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target firstprivate (p), map (p[0]) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target map (p[0]) map (p) /* { dg-error "appears both in data and map clauses" } */
bar (p);
#pragma omp target map (p) , map (p[0]) /* { dg-error "appears both in data and map clauses" } */
bar (p);
#pragma omp target map (q) map (q) /* { dg-error "appears more than once in map clauses" } */
bar (&q);
#pragma omp target map (p[0]) map (p[0]) /* { dg-error "appears more than once in data clauses" } */
bar (p);
#pragma omp target map (t) map (t.r) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) map (t) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) map (t.r) /* { dg-error "appears more than once in map clauses" } */
bar (&t.r);
#pragma omp target firstprivate (t), map (t.r) /* { dg-error "appears both in data and map clauses" } */
bar (&t.r);
#pragma omp target map (t.r) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (&t.r);
#pragma omp target map (t.s[0]) map (t) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target map (t) map(t.s[0]) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target firstprivate (t) map (t.s[0]) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t.s[0]) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t.s[0]) map (t.s[2]) /* { dg-error "appears more than once in map clauses" } */
bar (t.s);
#pragma omp target map (t.t[0:2]) map (t.t[4:6]) /* { dg-error "appears more than once in map clauses" } */
bar (t.t);
#pragma omp target map (t.t[i:j]) map (t.t[k:l]) /* { dg-error "appears more than once in map clauses" } */
bar (t.t);
#pragma omp target map (t.s[0]) map (t.r)
bar (t.s);
#pragma omp target map (t.r) ,map (t.s[0])
bar (t.s);
#pragma omp target map (t.r) map (t) map (t.s[0]) firstprivate (t) /* { dg-error "appears both in data and map clauses" } */
bar (t.s);
#pragma omp target map (t) map (t.r) firstprivate (t) map (t.s[0]) /* { dg-error "appears both in data and map clauses" } */
bar (t.s); /* { dg-error "appears more than once in map clauses" "" { target *-*-* } .-1 } */
}
|
Heuristic.h | #ifndef HEURISTIC_H
#define HEURISTIC_H
#include "Matrix.h"
const vector<int> offset{-1, 1};
inline bool AddVerticalNeighbors(const unsigned &i, const unsigned &j, const unsigned &m, Matrix *matrix,
set<unsigned> *F_aligned, Alignment *sure)
{
bool added = false;
for (auto &p : offset)
{
const unsigned x = i + p;
if ((1 < i) && (i < m - 1) && F_aligned->find(x) == F_aligned->end() && (*matrix)(x, j) == 1)
{
sure->push_back(make_pair(j, x));
(*matrix)(x, j) = 2;
F_aligned->insert(x);
added = true;
}
}
return added;
}
inline bool AddHorizontalNeighbors(const unsigned &i, const unsigned &j, const unsigned &n, Matrix *matrix,
set<unsigned> *E_aligned, Alignment *sure)
{
bool added = false;
for (auto &p : offset)
{
const unsigned y = j + p;
if ((1 < j) && (j < n - 1) && E_aligned->find(y) == E_aligned->end() && (*matrix)(i, y) == 1)
{
sure->push_back(make_pair(y, i));
(*matrix)(i, y) = 2;
E_aligned->insert(y);
added = true;
}
}
return added;
}
inline bool AddDiagonalNeighbors(const unsigned &i, const unsigned &j, const unsigned &m, const unsigned &n, Matrix *matrix,
set<unsigned> *F_aligned, set<unsigned> *E_aligned, Alignment *sure)
{
bool added = false;
for (auto &p : offset)
{
for (auto &q : offset)
{
const unsigned x = i + p;
const unsigned y = j + q;
if ((1 < j) && (j < n - 1) && (1 < i) && (i < m - 1))
{
if (F_aligned->find(i) == F_aligned->end() || E_aligned->find(j) == E_aligned->end())
{
if ((*matrix)(x, y) == 1)
{
sure->push_back(make_pair(y, x));
(*matrix)(x, y) = 2;
F_aligned->insert(x);
E_aligned->insert(y);
added = true;
}
}
}
}
}
return added;
}
inline void AddFinalAndNeighbors(const unsigned &m, const unsigned &n, Matrix *matrix,
set<unsigned> *F_aligned, set<unsigned> *E_aligned, Alignment *sure)
{
for (unsigned i = 0; i < m; i++)
{
for (unsigned j = 0; j < n; j++)
{
if (F_aligned->find(i) == F_aligned->end() && E_aligned->find(j) == E_aligned->end())
{
if ((*matrix)(i, j) == 1)
{
sure->push_back(make_pair(j, i));
F_aligned->insert(i);
E_aligned->insert(j);
}
}
}
}
}
void Heuristic(const unsigned &m, const unsigned &n, set<unsigned> *F_aligned, set<unsigned> *E_aligned,
Alignment *sure, Matrix *matrix)
{
bool added = false;
while (true)
{
added = false;
for (unsigned i = 0; i < m; i++)
{
for (unsigned j = 0; j < n; j++)
{
if ((*matrix)(i, j) == 2)
{
if (AddVerticalNeighbors(i, j, m, matrix, F_aligned, sure) ||
AddHorizontalNeighbors(i, j, n, matrix, F_aligned, sure) ||
AddDiagonalNeighbors(i, j, m, n, matrix, F_aligned, E_aligned, sure))
added = true;
}
}
}
if (!added)
break;
}
AddFinalAndNeighbors(m, n, matrix, F_aligned, E_aligned, sure);
}
void SymmetrizeAlignment(const AlignmentList &forward_list, const AlignmentList &backward_list,
AlignmentList *symmetric_list)
{
CHECK(forward_list.size() == backward_list.size(), "#ERROR, two alignment lists with different size!");
symmetric_list->resize(forward_list.size());
#pragma omp parallel for schedule(dynamic)
for (size_t i = 0; i < symmetric_list->size(); i++)
{
const Alignment &forward_links = forward_list[i];
const Alignment &backward_links = backward_list[i];
const unsigned n = static_cast<unsigned>(forward_links.size());
const unsigned m = static_cast<unsigned>(backward_links.size());
Matrix align_matrix(m, n);
Alignment sure;
set<unsigned> F_aligned, E_aligned;
for (auto &it : forward_links)
{
const unsigned int y = it.first;
const unsigned int x = it.second;
if (x < m)
align_matrix(x, y) = 1;
}
for (auto &it : backward_links)
{
const unsigned int x = it.first;
const unsigned int y = it.second;
if (y < n)
{
if (align_matrix(x, y) > 0)
{
sure.push_back(make_pair(y, x));
F_aligned.insert(x);
E_aligned.insert(y);
}
align_matrix(x, y) += 1;
}
}
//align_matrix.PrintMatrix();
Heuristic(m, n, &F_aligned, &E_aligned, &sure, &align_matrix);
(*symmetric_list)[i] = sure;
}
}
#endif // HEURISTIC_H |
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-3,4)),ceild(24*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(12*t1+Ny+21,16)),floord(24*t2+Ny+20,16)),floord(24*t1-24*t2+Nz+Ny+19,16));t3++) {
for (t4=max(max(max(0,ceild(3*t1-127,128)),ceild(24*t2-Nz-508,512)),ceild(16*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(12*t1+Nx+21,512)),floord(24*t2+Nx+20,512)),floord(16*t3+Nx+12,512)),floord(24*t1-24*t2+Nz+Nx+19,512));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),16*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),16*t3+14),512*t4+510),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__times_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint32)
// A*D function (colscale): GB (_AxD__times_uint32)
// D*A function (rowscale): GB (_DxB__times_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint32)
// C=scalar+B GB (_bind1st__times_uint32)
// C=scalar+B' GB (_bind1st_tran__times_uint32)
// C=A+scalar GB (_bind2nd__times_uint32)
// C=A'+scalar GB (_bind2nd_tran__times_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_int32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int32_fp32
// op(A') function: GB_tran__lnot_int32_fp32
// C type: int32_t
// A type: float
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z ; GB_CAST_SIGNED(z,aij,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int32_fp32
(
int32_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__gt_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint32)
// A*D function (colscale): GB (_AxD__gt_uint32)
// D*A function (rowscale): GB (_DxB__gt_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint32)
// C=scalar+B GB (_bind1st__gt_uint32)
// C=scalar+B' GB (_bind1st_tran__gt_uint32)
// C=A+scalar GB (_bind2nd__gt_uint32)
// C=A'+scalar GB (_bind2nd_tran__gt_uint32)
// C type: bool
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT32 || GxB_NO_GT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv4x4s4_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv4x4s4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 4*outw + w*3;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v7.4s}, [%1] \n" // v7 = outptr
"ld1 {v8.4s}, [%2], #16 \n"// v8 = r0
"ld1 {v9.4s}, [%3], #16 \n"// v9 = r1
"prfm pldl1keep, [%4, #512] \n"
"prfm pldl1keep, [%5, #512] \n"
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n"// v10 = r2
"ld1 {v11.4s}, [%5], #16 \n"// v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v5.4s, v12.4s, v13.4s \n"
"ld1 {v8.4s}, [%2], #16 \n"// v8 = r0
"ld1 {v9.4s}, [%3], #16 \n"// v9 = r1
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n"// v10 = r2
"ld1 {v11.4s}, [%5], #16 \n"// v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v6.4s, v12.4s, v13.4s \n"
"ld1 {v8.4s}, [%2], #16 \n"// v8 = r0
"ld1 {v9.4s}, [%3], #16 \n"// v9 = r1
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n"// v10 = r2
"ld1 {v11.4s}, [%5], #16 \n"// v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v14.4s, v12.4s, v13.4s \n"
"faddp v5.4s, v5.4s, v6.4s \n" // Move to here to enhance ILP
"ld1 {v8.4s}, [%2], #16 \n"// v8 = r0
"ld1 {v9.4s}, [%3], #16 \n"// v9 = r1
"fmul v12.4s, v8.4s, %12.4s \n"
"fmul v13.4s, v9.4s, %13.4s \n"
"ld1 {v10.4s}, [%4], #16 \n"// v10 = r2
"ld1 {v11.4s}, [%5], #16 \n"// v11 = r3
"fmla v12.4s, v10.4s, %14.4s \n"
"fmla v13.4s, v11.4s, %15.4s \n"
"fadd v15.4s, v12.4s, v13.4s \n"
// "faddp v5.4s , v5.4s, v6.4s \n" // Move this line upward.
"faddp v14.4s, v14.4s, v15.4s \n"
"faddp v5.4s , v5.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v5.4s \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415) // %15
: "cc", "memory", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"0: \n"
"pld [%2, #512] \n"
"pld [%3, #512] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"pld [%4, #512] \n"
"pld [%5, #512] \n"
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q5, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q6, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q14, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q15, q12, q13 \n"
"vadd.f32 d10, d10, d11 \n"
"vadd.f32 d28, d28, d29 \n"
"vadd.f32 d11, d12, d13 \n"
"vadd.f32 d29, d30, d31 \n"
"vpadd.f32 d10, d10, d11 \n"
"vpadd.f32 d11, d28, d29 \n"
"vadd.f32 q7, q7, q5 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415) // %15
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float sum = 0.f;
asm volatile(
"ld1 {v8.4s}, [%0], #16 \n"// v8 = r0
"ld1 {v9.4s}, [%1], #16 \n"// v9 = r1
"fmul v12.4s, v8.4s, %9.4s \n"
"fmul v13.4s, v9.4s, %10.4s \n"
"ld1 {v10.4s}, [%2], #16 \n"// v10 = r2
"ld1 {v11.4s}, [%3], #16 \n"// v11 = r3
"fmla v12.4s, v10.4s, %11.4s \n"
"fmla v13.4s, v11.4s, %12.4s \n"
"fadd v5.4s, v12.4s, v13.4s \n"
"faddp v5.4s, v5.4s, v5.4s \n"
"faddp s5, v5.2s \n"
"fmov %w4, s5 \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(sum) // %4
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"w"(_k0123), // %9
"w"(_k4567), // %10
"w"(_k891011), // %11
"w"(_k12131415) // %12
: "cc", "memory", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13"
);
*outptr += sum;
#else
float sum = 0.f;
asm volatile(
"vld1.f32 {d16-d17}, [%0]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%1]! \n"// q9 = r1
"vmul.f32 q12, q8, %q9 \n"
"vmul.f32 q13, q9, %q10 \n"
"vld1.f32 {d20-d21}, [%2]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%3]! \n"// q11 = r3
"vmla.f32 q12, q10, %q11 \n"
"vmla.f32 q13, q11, %q12 \n"
"vadd.f32 q5, q12, q13 \n"
"vadd.f32 d10, d10, d11 \n"
"vpadd.f32 d10, d10, d10 \n"
"vmov.f32 %4, d10[0] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(sum) // %4
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"w"(_k0123), // %9
"w"(_k4567), // %10
"w"(_k891011), // %11
"w"(_k12131415) // %12
: "cc", "memory", "q5", "q6", "q8", "q9", "q10", "q11", "q12", "q13"
);
*outptr += sum;
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
*outptr += sum;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
#endif // __ARM_NEON
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
}
}
}
|
core_ctrtri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrtri.c, normal z -> c, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trtri
*
* Computes the inverse of an upper or lower
* triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* = PlasmaNonUnit: A is non-unit triangular;
* = PlasmaUnit: A is unit triangular.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @retval PlasmaSuccess on successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, A(i,i) is exactly zero. The triangular
* matrix is singular and its inverse can not be computed.
*
******************************************************************************/
__attribute__((weak))
int plasma_core_ctrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
plasma_complex32_t *A, int lda)
{
return LAPACKE_ctrtri_work(LAPACK_COL_MAJOR,
lapack_const(uplo), lapack_const(diag),
n, A, lda);
}
/******************************************************************************/
void plasma_core_omp_ctrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
plasma_complex32_t *A, int lda,
int iinfo,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = plasma_core_ctrtri(uplo, diag,
n, A, lda);
if (info != 0)
plasma_request_fail(sequence, request, iinfo+info);
}
}
}
|
gost_fmt_plug.c | /*
* GOST 3411 cracker patch for JtR. Hacked together during
* May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>,
* Sergey V. <sftp.mtuci at gmail com>, and JimF
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* Sergey V. <sftp.mtuci at gmail com>, and JimF
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:gost-hash;
* user:$gost$gost-hash;
* user:$gost-cp$gost-cryptopro-hash;
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_gost;
#elif FMT_REGISTERS_H
john_register_one(&fmt_gost);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "gost.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "gost"
#define FORMAT_NAME "GOST R 34.11-94"
#define FORMAT_TAG "$gost$"
#define TAG_LENGTH 6
#define FORMAT_TAG_CP "$gost-cp$"
#define TAG_CP_LENGTH 9
#if !defined(USE_GCC_ASM_IA32) && defined(USE_GCC_ASM_X64)
#define ALGORITHM_NAME "64/64"
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define SALT_SIZE 1
#define SALT_ALIGN 1
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests gost_tests[] = {
{"ce85b99cc46752fffee35cab9a7b0278abb4c2d2055cff685af4912c49490f8d", ""},
{"d42c539e367c66e9c88a801f6649349c21871b4344c6a573f849fdce62f314dd", "a"},
{FORMAT_TAG "ce85b99cc46752fffee35cab9a7b0278abb4c2d2055cff685af4912c49490f8d", ""},
{FORMAT_TAG "d42c539e367c66e9c88a801f6649349c21871b4344c6a573f849fdce62f314dd", "a"},
{FORMAT_TAG "ad4434ecb18f2c99b60cbe59ec3d2469582b65273f48de72db2fde16a4889a4d", "message digest"},
{FORMAT_TAG "0886f91e7fcaff65eb2635a1a4c9f203003e0ce5ea74b72fc6462cc72649694e",
"This is very very long pass phrase for test gost hash function."},
{FORMAT_TAG_CP "981e5f3ca30c841487830f84fb433e13ac1101569b9c13584ac483234cd656c0", ""},
{FORMAT_TAG_CP "e74c52dd282183bf37af0079c9f78055715a103f17e3133ceff1aacf2f403011", "a"},
{FORMAT_TAG_CP "bc6041dd2aa401ebfa6e9886734174febdb4729aa972d60f549ac39b29721ba0", "message digest"},
{FORMAT_TAG_CP "5394adfacb65a9ac5781c3080b244c955a9bf03befd51582c3850b8935f80762",
"This is very very long pass phrase for test gost hash function."},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[8];
static int is_cryptopro; /* non 0 for CryptoPro hashes */
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
gost_init_table();
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
else if (!strncmp(p, FORMAT_TAG_CP, TAG_CP_LENGTH))
p += TAG_CP_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
else if (!strncmp(ciphertext, FORMAT_TAG_CP, TAG_CP_LENGTH))
return ciphertext;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
return out;
}
static void *get_salt(char *ciphertext)
{
static char i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
i=0;
else
i=1;
return &i;
}
static void set_salt(void *salt)
{
is_cryptopro = *(char*)salt;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = ciphertext + TAG_LENGTH;
else
p = ciphertext + TAG_CP_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
gost_ctx ctx;
if (is_cryptopro)
john_gost_cryptopro_init(&ctx);
else
john_gost_init(&ctx);
john_gost_update(&ctx, (const unsigned char*)saved_key[index],
strlen(saved_key[index]));
john_gost_final(&ctx, (unsigned char *)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (crypt_out[index][0] == *(ARCH_WORD_32*)binary)
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_gost = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
gost_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__land_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int16)
// A*D function (colscale): GB (_AxD__land_int16)
// D*A function (rowscale): GB (_DxB__land_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int16)
// C=scalar+B GB (_bind1st__land_int16)
// C=scalar+B' GB (_bind1st_tran__land_int16)
// C=A+scalar GB (_bind2nd__land_int16)
// C=A'+scalar GB (_bind2nd_tran__land_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT16 || GxB_NO_LAND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
points.c | #include "image.h"
#include <stdlib.h>
#include <memory.h>
#include <assert.h>
#include <limits.h>
#include <kazmath/vec2.h>
// Transforms even the sequence 0,1,2,3,... into reasonably good random numbers.
inline unsigned int randhash(unsigned int seed)
{
unsigned int i = (seed ^ 12345391u) * 2654435769u;
i ^= (i << 6) ^ (i >> 26);
i *= 2654435769u;
i += (i << 5) ^ (i >> 12);
return i;
}
inline float randhashf(unsigned int seed, float a, float b)
{
return (b - a) * randhash(seed) / (float) UINT_MAX + a;
}
heman_image* heman_points_create(HEMAN_FLOAT* xy, int npoints, int nbands)
{
heman_points* img = malloc(sizeof(heman_image));
img->width = npoints;
img->height = 1;
img->nbands = nbands;
int nbytes = sizeof(HEMAN_FLOAT) * npoints * nbands;
img->data = malloc(nbytes);
memcpy(img->data, xy, nbytes);
return img;
}
void heman_points_destroy(heman_points* img)
{
free(img->data);
free(img);
}
heman_points* heman_points_from_grid(HEMAN_FLOAT width, HEMAN_FLOAT height,
HEMAN_FLOAT cellsize, HEMAN_FLOAT jitter)
{
int cols = width / cellsize;
int rows = height / cellsize;
int ncells = cols * rows;
heman_points* result = heman_image_create(ncells, 1, 2);
HEMAN_FLOAT rscale = 2.0 * jitter / (HEMAN_FLOAT) RAND_MAX;
// TODO it would be good to avoid ANSI rand() and add some determinism
// in a thread-safe way. Maybe we should add a seed argument and use
// Bridson's randhash?
#pragma omp parallel for
for (int j = 0; j < rows; j++) {
HEMAN_FLOAT* dst = result->data + j * cols * 2;
HEMAN_FLOAT y = cellsize * 0.5 + cellsize * j;
HEMAN_FLOAT x = cellsize * 0.5;
for (int i = 0; i < cols; i++) {
HEMAN_FLOAT rx = rand() * rscale - jitter;
HEMAN_FLOAT ry = rand() * rscale - jitter;
*dst++ = x + rx;
*dst++ = y + ry;
x += cellsize;
}
}
return result;
}
kmVec2 sample_annulus(float radius, kmVec2 center, unsigned int* seedptr)
{
unsigned int seed = *seedptr;
kmVec2 r;
float rscale = 1.0f / UINT_MAX;
while (1) {
r.x = 4 * rscale * randhash(seed++) - 2;
r.y = 4 * rscale * randhash(seed++) - 2;
float r2 = kmVec2LengthSq(&r);
if (r2 > 1 && r2 <= 4) {
break;
}
}
*seedptr = seed;
kmVec2Scale(&r, &r, radius);
kmVec2Add(&r, &r, ¢er);
return r;
}
#define GRIDF(vec) \
grid[(int) (vec.x * invcell) + ncols * (int) (vec.y * invcell)]
#define GRIDI(vec) grid[(int) vec.y * ncols + (int) vec.x]
heman_points* heman_points_from_poisson(
HEMAN_FLOAT width, HEMAN_FLOAT height, HEMAN_FLOAT radius)
{
int maxattempts = 30;
float rscale = 1.0f / UINT_MAX;
unsigned int seed = 0;
kmVec2 rvec;
rvec.x = rvec.y = radius;
float r2 = radius * radius;
// Acceleration grid.
float cellsize = radius / sqrtf(2);
float invcell = 1.0f / cellsize;
int ncols = ceil(width * invcell);
int nrows = ceil(height * invcell);
int maxcol = ncols - 1;
int maxrow = nrows - 1;
int ncells = ncols * nrows;
int* grid = malloc(ncells * sizeof(int));
for (int i = 0; i < ncells; i++) {
grid[i] = -1;
}
// Active list and resulting sample list.
int* actives = malloc(ncells * sizeof(int));
int nactives = 0;
heman_points* result = heman_image_create(ncells, 1, 2);
kmVec2* samples = (kmVec2*) result->data;
int nsamples = 0;
// First sample.
kmVec2 pt;
pt.x = width * randhash(seed++) * rscale;
pt.y = height * randhash(seed++) * rscale;
GRIDF(pt) = actives[nactives++] = nsamples;
samples[nsamples++] = pt;
while (nsamples < ncells) {
int aindex = MIN(randhashf(seed++, 0, nactives), nactives - 1);
int sindex = actives[aindex];
int found = 0;
kmVec2 j, minj, maxj, delta;
int attempt;
for (attempt = 0; attempt < maxattempts && !found; attempt++) {
pt = sample_annulus(radius, samples[sindex], &seed);
// Check that this sample is within bounds.
if (pt.x < 0 || pt.x >= width || pt.y < 0 || pt.y >= height) {
continue;
}
// Test proximity to nearby samples.
minj = maxj = pt;
kmVec2Add(&maxj, &maxj, &rvec);
kmVec2Subtract(&minj, &minj, &rvec);
kmVec2Scale(&minj, &minj, invcell);
kmVec2Scale(&maxj, &maxj, invcell);
minj.x = CLAMP((int) minj.x, 0, maxcol);
maxj.x = CLAMP((int) maxj.x, 0, maxcol);
minj.y = CLAMP((int) minj.y, 0, maxrow);
maxj.y = CLAMP((int) maxj.y, 0, maxrow);
int reject = 0;
for (j.y = minj.y; j.y <= maxj.y && !reject; j.y++) {
for (j.x = minj.x; j.x <= maxj.x && !reject; j.x++) {
int entry = GRIDI(j);
if (entry > -1 && entry != sindex) {
kmVec2Subtract(&delta, &samples[entry], &pt);
if (kmVec2LengthSq(&delta) < r2) {
reject = 1;
}
}
}
}
if (reject) {
continue;
}
found = 1;
}
if (found) {
GRIDF(pt) = actives[nactives++] = nsamples;
samples[nsamples++] = pt;
} else {
if (--nactives < 0) {
break;
}
actives[aindex] = actives[nactives];
}
}
// The following line probably isn't necessary. Paranoia.
result->width = nsamples;
free(grid);
free(actives);
return result;
}
#undef GRIDF
#undef GRIDI
#define NGRID_INDEX(fpt) \
((int) (fpt.x * invcell) + ncols * (int) (fpt.y * invcell))
#define GRID_INDEX(fpt) (gcapacity * NGRID_INDEX(fpt))
#define GRID_INSERT(fpt, sindex) \
gindex = NGRID_INDEX(fpt); \
grid[gcapacity * gindex + ngrid[gindex]] = sindex; \
ngrid[gindex]++
#define NGRID_BEGIN(ipt) ((int) ipt.y * ncols + (int) ipt.x)
#define GRID_BEGIN(ipt) (NGRID_BEGIN(ipt) * gcapacity)
#define GRID_END(ipt) (GRID_BEGIN(ipt) + ngrid[NGRID_BEGIN(ipt)])
heman_points* heman_points_from_density(
heman_image* density, HEMAN_FLOAT minradius, HEMAN_FLOAT maxradius)
{
assert(density->nbands == 1);
float width = 1;
float height = density->height / density->width;
int maxattempts = 30;
float rscale = 1.0f / UINT_MAX;
unsigned int seed = 0;
kmVec2 rvec;
rvec.x = rvec.y = maxradius;
int gindex;
// Acceleration grid.
float cellsize = maxradius / sqrtf(2);
float invcell = 1.0f / cellsize;
int ncols = ceil(width * invcell);
int nrows = ceil(height * invcell);
int maxcol = ncols - 1;
int maxrow = nrows - 1;
int ncells = ncols * nrows;
int ntexels = cellsize * density->width;
int gcapacity = ntexels * ntexels;
int* grid = malloc(ncells * sizeof(int) * gcapacity);
int* ngrid = malloc(ncells * sizeof(int));
for (int i = 0; i < ncells; i++) {
ngrid[i] = 0;
}
// Active list and resulting sample list.
int* actives = malloc(ncells * sizeof(int));
int nactives = 0;
int maxsamples = ncells * gcapacity;
heman_points* result = heman_image_create(maxsamples, 1, 2);
kmVec2* samples = (kmVec2*) result->data;
int nsamples = 0;
// First sample.
kmVec2 pt;
pt.x = width * randhash(seed++) * rscale;
pt.y = height * randhash(seed++) * rscale;
actives[nactives++] = nsamples;
GRID_INSERT(pt, nsamples);
samples[nsamples++] = pt;
while (nsamples < maxsamples) {
int aindex = MIN(randhashf(seed++, 0, nactives), nactives - 1);
int sindex = actives[aindex];
int found = 0;
kmVec2 j, minj, maxj, delta;
int attempt;
for (attempt = 0; attempt < maxattempts && !found; attempt++) {
pt = sample_annulus(maxradius, samples[sindex], &seed);
// Check that this sample is within bounds.
if (pt.x < 0 || pt.x >= width || pt.y < 0 || pt.y >= height) {
continue;
}
// Test proximity to nearby samples.
minj = maxj = pt;
kmVec2Add(&maxj, &maxj, &rvec);
kmVec2Subtract(&minj, &minj, &rvec);
kmVec2Scale(&minj, &minj, invcell);
kmVec2Scale(&maxj, &maxj, invcell);
minj.x = CLAMP((int) minj.x, 0, maxcol);
maxj.x = CLAMP((int) maxj.x, 0, maxcol);
minj.y = CLAMP((int) minj.y, 0, maxrow);
maxj.y = CLAMP((int) maxj.y, 0, maxrow);
int reject = 0;
HEMAN_FLOAT densityval;
heman_image_sample(density, pt.x, pt.y, &densityval);
// The following square root seems to lead to more satisfying
// results, although we should perhaps let the client decide...
densityval = sqrt(densityval);
float mindist = maxradius - densityval * (maxradius - minradius);
float r2 = mindist * mindist;
for (j.y = minj.y; j.y <= maxj.y && !reject; j.y++) {
for (j.x = minj.x; j.x <= maxj.x && !reject; j.x++) {
for (int g = GRID_BEGIN(j); g < GRID_END(j); ++g) {
int entry = grid[g];
if (entry != sindex) {
kmVec2Subtract(&delta, &samples[entry], &pt);
if (kmVec2LengthSq(&delta) < r2) {
reject = 1;
}
}
}
}
}
if (reject) {
continue;
}
found = 1;
}
if (found && ngrid[NGRID_INDEX(pt)] >= gcapacity) {
found = 0;
}
if (found) {
actives[nactives++] = nsamples;
GRID_INSERT(pt, nsamples);
samples[nsamples++] = pt;
} else {
if (--nactives < 0) {
break;
}
actives[aindex] = actives[nactives];
}
}
// We don't usually fill the pre-allocated buffer, since it was
// allocated for the worst case, so adjust the size:
result->width = nsamples;
free(grid);
free(ngrid);
free(actives);
return result;
}
|
GB_unaryop__identity_uint16_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_fp64
// op(A') function: GB_tran__identity_uint16_fp64
// C type: uint16_t
// A type: double
// cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_fp64
(
uint16_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image,exception);
(void) NegateImage(charcoal_image,MagickFalse,exception);
(void) GrayscaleImage(charcoal_image,image->intensity,exception);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent],
statistic[MagickPathExtent];
const char
*value;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*StringToDouble(value,(char **) NULL));
}
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double)
depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",
standard_deviation);
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*p,
*value;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
double
alpha,
beta;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
(void) CopyMagickString(name,p,MagickPathExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(
name),ClonePixelInfo(&pixel));
p+=strlen(name);
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
char
*q,
*subexpression;
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
FxReturn(0.0);
}
FxReturn(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=fabs(floor((*beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
FxReturn(0.0);
}
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
char
numeric[MagickPathExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
(void) FormatLocaleString(numeric,MagickPathExtent,"%.20g",*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (strlen(subexpression) != 0)
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="opacity"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="opacity"; break;
default: type="unknown"; break;
}
*subexpression='\0';
if (strlen(expression) > 6)
(void) CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (LocaleNCompare(expression,"erf",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
FxReturn(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha));
FxReturn(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gamma=alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta);
FxReturn(gamma);
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
gamma=sin((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
} while (fabs(alpha) >= MagickEpsilon);
FxReturn(*beta);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
FxInfo
**fx_info;
double
alpha;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x/scale.x+center.x),
(double) (factor*delta.y/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const double pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *random_info,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma);
}
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image,exception);
(void) NegateImage(dodge_image,MagickFalse,exception);
(void) TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (double *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(double *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
GB_binop__bshift_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint16)
// C=scalar+B GB (_bind1st__bshift_uint16)
// C=scalar+B' GB (_bind1st_tran__bshift_uint16)
// C=A+scalar GB (_bind2nd__bshift_uint16)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_uint16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_uint16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SwathFileConsumer.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2016.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Hannes Roest $
// $Authors: Hannes Roest $
// --------------------------------------------------------------------------
#ifndef OPENMS_FORMAT_DATAACCESS_SWATHFILECONSUMER_H
#define OPENMS_FORMAT_DATAACCESS_SWATHFILECONSUMER_H
#include <boost/cast.hpp>
// Datastructures
#include <OpenMS/ANALYSIS/OPENSWATH/OPENSWATHALGO/DATAACCESS/DataStructures.h>
#include <OpenMS/ANALYSIS/OPENSWATH/OPENSWATHALGO/DATAACCESS/SwathMap.h>
// Consumers
#include <OpenMS/FORMAT/DATAACCESS/MSDataCachedConsumer.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>
#include <OpenMS/FORMAT/DATAACCESS/MSDataTransformingConsumer.h>
// Helpers
#include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h>
#include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h>
#include <OpenMS/INTERFACES/IMSDataConsumer.h>
#include <OpenMS/FORMAT/CachedMzML.h>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace OpenMS
{
/**
* @brief Abstract base class which can consume spectra coming from SWATH experiment stored in a single file.
*
* The class consumes spectra which are coming from a complete SWATH
* experiment. It will group MS2 spectra by their precursor m/z, assuming
* that they correspond to the same SWATH window. For example, the spectra
* could be arranged in the following fashion:
*
* - MS1 Spectrum (no precursor)
* - MS2 Spectrum (precursor = [400,425])
* - MS2 Spectrum (precursor = [425,450])
* - [...]
* - MS2 Spectrum (precursor = [1175,1200])
* - MS1 Spectrum (no precursor)
* - MS2 Spectrum (precursor = [400,425])
* - MS2 Spectrum (precursor = [425,450])
* - [...]
*
* Base classes are expected to implement functions consuming a spectrum coming
* from a specific SWATH or an MS1 spectrum and a final function
* ensureMapsAreFilled_ after which the swath_maps_ vector needs to contain
* valid pointers to MSExperiment.
*
* In addition it is possible to provide the swath boundaries and the read in
* spectra will be matched by their precursor m/z to the "center" attribute
* of the provided Swath maps.
*
* Usage:
*
* @code
* FullSwathFileConsumer * dataConsumer;
* // assign dataConsumer to an implementation of FullSwathFileConsumer
* MzMLFile().transform(file, dataConsumer);
* dataConsumer->retrieveSwathMaps(maps);
* @endcode
*
*/
class OPENMS_DLLAPI FullSwathFileConsumer :
public Interfaces::IMSDataConsumer<>
{
public:
typedef MSExperiment<> MapType;
typedef MapType::SpectrumType SpectrumType;
typedef MapType::ChromatogramType ChromatogramType;
FullSwathFileConsumer() :
ms1_map_(), // initialize to null
consuming_possible_(true),
use_external_boundaries_(false),
correct_window_counter_(0)
{
use_external_boundaries_ = !swath_map_boundaries_.empty();
}
/**
* @brief Constructor
*
* @param swath_boundaries A vector of SwathMaps of which only the center,
* lower and upper attributes will be used to infer the expected Swath maps.
*
*/
FullSwathFileConsumer(std::vector<OpenSwath::SwathMap> swath_boundaries) :
swath_map_boundaries_(swath_boundaries),
ms1_map_(), // initialize to null
consuming_possible_(true),
use_external_boundaries_(false),
correct_window_counter_(0)
{
use_external_boundaries_ = !swath_map_boundaries_.empty();
}
~FullSwathFileConsumer() {}
void setExpectedSize(Size, Size) {}
void setExperimentalSettings(const ExperimentalSettings& exp) {settings_ = exp; }
/**
* @brief Populate the vector of swath maps after consuming all spectra.
*
* Will populate the input vector with SwathMap objects which correspond to
* the MS1 map (if present) and the MS2 maps (SWATH maps). This should be
* called after all spectra are consumed.
*
* @note It is not possible to consume any more spectra after calling this
* function (it contains finalization code and may close file streams).
*
*/
void retrieveSwathMaps(std::vector<OpenSwath::SwathMap>& maps)
{
consuming_possible_ = false; // make consumption of further spectra / chromatograms impossible
ensureMapsAreFilled_();
if (ms1_map_)
{
OpenSwath::SwathMap map;
map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(ms1_map_);
map.lower = -1;
map.upper = -1;
map.center = -1;
map.ms1 = true;
maps.push_back(map);
}
// Print warning if the lower/upper window could not be determined and we
// required manual determination of the boundaries.
if (!use_external_boundaries_ && correct_window_counter_ != swath_maps_.size())
{
std::cout << "WARNING: Could not correctly read the upper/lower limits of the SWATH windows from your input file. Read " <<
correct_window_counter_ << " correct (non-zero) window limits (expected " << swath_maps_.size() << " windows)." << std::endl;
}
size_t nonempty_maps = 0;
for (Size i = 0; i < swath_maps_.size(); i++)
{
OpenSwath::SwathMap map;
map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(swath_maps_[i]);
map.lower = swath_map_boundaries_[i].lower;
map.upper = swath_map_boundaries_[i].upper;
map.center = swath_map_boundaries_[i].center;
map.ms1 = false;
maps.push_back(map);
if (map.sptr->getNrSpectra() > 0) {nonempty_maps++;}
}
if (nonempty_maps != swath_map_boundaries_.size())
{
std::cout << "WARNING: The number nonempty maps found in the input file (" << nonempty_maps << ") is not equal to the number of provided swath window boundaries (" <<
swath_map_boundaries_.size() << "). Please check your input." << std::endl;
}
}
/// Consume a chromatogram -> should not happen when dealing with SWATH maps
void consumeChromatogram(MapType::ChromatogramType&)
{
std::cerr << "Read chromatogram while reading SWATH files, did not expect that!" << std::endl;
}
/**
* @brief * Consume a spectrum which may belong either to an MS1 scan or
* one of n MS2 (SWATH) scans
*
*/
void consumeSpectrum(MapType::SpectrumType& s)
{
if (!consuming_possible_)
{
throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__,
"FullSwathFileConsumer cannot consume any more spectra after retrieveSwathMaps has been called already");
}
if (s.getMSLevel() == 1)
{
consumeMS1Spectrum_(s);
}
else
{
if (s.getPrecursors().empty())
{
throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__,
"Swath scan does not provide a precursor.");
}
const std::vector<Precursor> prec = s.getPrecursors();
double center = prec[0].getMZ();
double lower = prec[0].getMZ() - prec[0].getIsolationWindowLowerOffset();
double upper = prec[0].getMZ() + prec[0].getIsolationWindowUpperOffset();
bool found = false;
// Check if enough information is present to infer the swath
if (center <= 0.0)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__,
"Swath scan does not provide any precursor isolation information.");
}
// try to match the current scan to one of the already known windows
for (Size i = 0; i < swath_map_boundaries_.size(); i++)
{
// We group by the precursor mz (center of the window) since this
// should be present in all SWATH scans.
if (std::fabs(center - swath_map_boundaries_[i].center) < 1e-6)
{
found = true;
consumeSwathSpectrum_(s, i);
}
}
if (!found)
{
if (use_external_boundaries_)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__,
String("Encountered SWATH scan with boundary ") + center + " m/z which was not present in the provided windows.");
}
else
{
consumeSwathSpectrum_(s, swath_map_boundaries_.size());
// we found a new SWATH window
if (lower > 0.0 && upper > 0.0)
{correct_window_counter_++;}
OpenSwath::SwathMap boundary;
boundary.lower = lower;
boundary.upper = upper;
boundary.center = center;
swath_map_boundaries_.push_back(boundary);
LOG_DEBUG << "Adding Swath centered at " << center
<< " m/z with an isolation window of " << lower << " to " << upper
<< " m/z." << std::endl;
}
}
}
}
protected:
/**
* @brief Consume an MS2 spectrum belonging to SWATH "swath_nr"
*
* This function should handle a spectrum belonging to a specific SWATH
* (indicated by swath_nr).
*
*/
virtual void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) = 0;
/**
* @brief Consume an MS1 spectrum
*
* This function should handle an MS1 spectrum.
*
*/
virtual void consumeMS1Spectrum_(MapType::SpectrumType& s) = 0;
/**
* @brief Callback function after the reading is complete
*
* Has to ensure that swath_maps_ and ms1_map_ are correctly populated.
*/
virtual void ensureMapsAreFilled_() = 0;
/// A list of Swath map identifiers (lower/upper boundary and center)
std::vector<OpenSwath::SwathMap> swath_map_boundaries_;
/// A list of SWATH maps and the MS1 map
std::vector<boost::shared_ptr<MSExperiment<> > > swath_maps_;
boost::shared_ptr<MSExperiment<> > ms1_map_;
/// The Experimental settings
// (MSExperiment has no constructor using ExperimentalSettings)
MSExperiment<> settings_;
/// Whether further spectra can still be consumed
bool consuming_possible_;
/// Whether to use external input for SWATH boundaries
bool use_external_boundaries_;
/// How many windows were correctly annotated (non-zero window limits)
size_t correct_window_counter_;
};
/**
* @brief In-memory implementation of FullSwathFileConsumer
*
* Keeps all the spectra in memory by just appending them to an MSExperiment.
*
*/
class OPENMS_DLLAPI RegularSwathFileConsumer :
public FullSwathFileConsumer
{
public:
typedef MSExperiment<> MapType;
typedef MapType::SpectrumType SpectrumType;
typedef MapType::ChromatogramType ChromatogramType;
RegularSwathFileConsumer() {}
RegularSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries) :
FullSwathFileConsumer(known_window_boundaries) {}
protected:
void addNewSwathMap_()
{
boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_));
swath_maps_.push_back(exp);
}
void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr)
{
while (swath_maps_.size() <= swath_nr)
{
addNewSwathMap_();
}
swath_maps_[swath_nr]->addSpectrum(s);
}
void addMS1Map_()
{
boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_));
ms1_map_ = exp;
}
void consumeMS1Spectrum_(MapType::SpectrumType& s)
{
if (!ms1_map_)
{
addMS1Map_();
}
ms1_map_->addSpectrum(s);
}
void ensureMapsAreFilled_() {}
};
/**
* @brief On-disk cached implementation of FullSwathFileConsumer
*
* Writes all spectra immediately to disk in a user-specified caching
* location using the MSDataCachedConsumer. Internally, it handles
* n+1 (n SWATH + 1 MS1 map) objects of MSDataCachedConsumer which can consume the
* spectra and write them to disk immediately.
*
*/
class OPENMS_DLLAPI CachedSwathFileConsumer :
public FullSwathFileConsumer
{
public:
typedef MSExperiment<> MapType;
typedef MapType::SpectrumType SpectrumType;
typedef MapType::ChromatogramType ChromatogramType;
CachedSwathFileConsumer(String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) :
ms1_consumer_(NULL),
swath_consumers_(),
cachedir_(cachedir),
basename_(basename),
nr_ms1_spectra_(nr_ms1_spectra),
nr_ms2_spectra_(nr_ms2_spectra)
{}
CachedSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries,
String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) :
FullSwathFileConsumer(known_window_boundaries),
ms1_consumer_(NULL),
swath_consumers_(),
cachedir_(cachedir),
basename_(basename),
nr_ms1_spectra_(nr_ms1_spectra),
nr_ms2_spectra_(nr_ms2_spectra)
{}
~CachedSwathFileConsumer()
{
// Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream
while (!swath_consumers_.empty())
{
delete swath_consumers_.back();
swath_consumers_.pop_back();
}
if (ms1_consumer_ != NULL)
{
delete ms1_consumer_;
ms1_consumer_ = NULL;
}
}
protected:
void addNewSwathMap_()
{
String meta_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML";
String cached_file = meta_file + ".cached";
MSDataCachedConsumer* consumer = new MSDataCachedConsumer(cached_file, true);
consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0);
swath_consumers_.push_back(consumer);
// maps for meta data
boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_));
swath_maps_.push_back(exp);
}
void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr)
{
while (swath_maps_.size() <= swath_nr)
{
addNewSwathMap_();
}
swath_consumers_[swath_nr]->consumeSpectrum(s);
swath_maps_[swath_nr]->addSpectrum(s); // append for the metadata (actual data is deleted)
}
void addMS1Map_()
{
String meta_file = cachedir_ + basename_ + "_ms1.mzML";
String cached_file = meta_file + ".cached";
ms1_consumer_ = new MSDataCachedConsumer(cached_file, true);
ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0);
boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_));
ms1_map_ = exp;
}
void consumeMS1Spectrum_(MapType::SpectrumType& s)
{
if (ms1_consumer_ == NULL)
{
addMS1Map_();
}
ms1_consumer_->consumeSpectrum(s);
ms1_map_->addSpectrum(s); // append for the metadata (actual data is deleted)
}
void ensureMapsAreFilled_()
{
size_t swath_consumers_size = swath_consumers_.size();
bool have_ms1 = (ms1_consumer_ != NULL);
// Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream
// The file streams to the cached data on disc can and should be closed
// here safely. Since ensureMapsAreFilled_ is called after consuming all
// the spectra, there will be no more spectra to append but the client
// might already want to read after this call, so all data needs to be
// present on disc and the file streams closed.
//
// TODO merge with destructor code into own function!
while (!swath_consumers_.empty())
{
delete swath_consumers_.back();
swath_consumers_.pop_back();
}
if (ms1_consumer_ != NULL)
{
delete ms1_consumer_;
ms1_consumer_ = NULL;
}
if (have_ms1)
{
boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>);
String meta_file = cachedir_ + basename_ + "_ms1.mzML";
// write metadata to disk and store the correct data processing tag
CachedmzML().writeMetadata(*ms1_map_, meta_file, true);
MzMLFile().load(meta_file, *exp.get());
ms1_map_ = exp;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_consumers_size); i++)
{
boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>);
String meta_file = cachedir_ + basename_ + "_" + String(i) + ".mzML";
// write metadata to disk and store the correct data processing tag
CachedmzML().writeMetadata(*swath_maps_[i], meta_file, true);
MzMLFile().load(meta_file, *exp.get());
swath_maps_[i] = exp;
}
}
MSDataCachedConsumer* ms1_consumer_;
std::vector<MSDataCachedConsumer*> swath_consumers_;
String cachedir_;
String basename_;
int nr_ms1_spectra_;
std::vector<int> nr_ms2_spectra_;
};
/**
* @brief On-disk mzML implementation of FullSwathFileConsumer
*
* Writes all spectra immediately to disk to an mzML file location using the
* PlainMSDataWritingConsumer. Internally, it handles n+1 (n SWATH + 1 MS1
* map) objects of MSDataCachedConsumerwhich can consume the spectra and
* write them to disk immediately.
*
*/
class OPENMS_DLLAPI MzMLSwathFileConsumer :
public FullSwathFileConsumer
{
public:
typedef MSExperiment<> MapType;
typedef MapType::SpectrumType SpectrumType;
typedef MapType::ChromatogramType ChromatogramType;
MzMLSwathFileConsumer(String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) :
ms1_consumer_(NULL),
swath_consumers_(),
cachedir_(cachedir),
basename_(basename),
nr_ms1_spectra_(nr_ms1_spectra),
nr_ms2_spectra_(nr_ms2_spectra)
{}
MzMLSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries,
String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) :
FullSwathFileConsumer(known_window_boundaries),
ms1_consumer_(NULL),
swath_consumers_(),
cachedir_(cachedir),
basename_(basename),
nr_ms1_spectra_(nr_ms1_spectra),
nr_ms2_spectra_(nr_ms2_spectra)
{}
~MzMLSwathFileConsumer()
{
deleteSetNull_();
}
protected:
void deleteSetNull_()
{
// Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream
while (!swath_consumers_.empty())
{
delete swath_consumers_.back();
swath_consumers_.pop_back();
}
if (ms1_consumer_ != NULL)
{
delete ms1_consumer_;
ms1_consumer_ = NULL;
}
}
void addNewSwathMap_()
{
String mzml_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML";
PlainMSDataWritingConsumer* consumer = new PlainMSDataWritingConsumer(mzml_file);
consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0);
swath_consumers_.push_back(consumer);
}
void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr)
{
// only use swath_maps_ to count how many we have already added
while (swath_consumers_.size() <= swath_nr)
{
addNewSwathMap_();
}
swath_consumers_[swath_nr]->consumeSpectrum(s);
s.clear(false);
}
void addMS1Map_()
{
String mzml_file = cachedir_ + basename_ + "_ms1.mzML";
ms1_consumer_ = new PlainMSDataWritingConsumer(mzml_file);
ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0);
boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_));
// ms1_map_ = exp;
}
void consumeMS1Spectrum_(MapType::SpectrumType& s)
{
if (ms1_consumer_ == NULL)
{
addMS1Map_();
}
ms1_consumer_->consumeSpectrum(s);
s.clear(false);
}
void ensureMapsAreFilled_()
{
deleteSetNull_();
}
PlainMSDataWritingConsumer* ms1_consumer_;
std::vector<PlainMSDataWritingConsumer*> swath_consumers_;
String cachedir_;
String basename_;
int nr_ms1_spectra_;
std::vector<int> nr_ms2_spectra_;
};
}
#endif
|
voxelize.c | #include "voxelGen.h"
/*
* Un-comment them to have debug messages or internal performance measurements
*/
//#define DEBUG_LOG
//#define PERF_LOG
/*
* the type Vertex is defined in the file "voxelGen.h", in any case
* the type definition is also written above (commented out) in
* between lines 7to 13
*/
void printVertex(Vertex *v) {
printf("(%f,%f,%f)", v->x, v->y, v->z);
}
/*
* voxel as {i,j,k}
*/
void printVoxel(Voxel *v) {
printf("(%d,%d,%d)", v->x, v->y, v->z);
}
/*
* as for A=B assigning the value of a point to another
*/
void assign(Vertex *to, Vertex *from) {
to->x = from->x;
to->y = from->y;
to->z = from->z;
}
/*
* A+B when B is implicitly a Vector3d ({x,y,z})
*/
void addTo(Vertex *result, Vertex *v, double x, double y, double z) {
result->x = v->x + x;
result->y = v->y + y;
result->z = v->z + z;
}
/*
* A-B when B is implicitly a Vector3d ({x,y,z})
*/
void subtractFrom(Vertex *result, Vertex *v, double x, double y, double z) {
result->x = v->x - x;
result->y = v->y - y;
result->z = v->z - z;
}
/*
* A+B
*/
void add(Vertex *result, Vertex *v1, Vertex *v2) {
addTo(result, v1, v2->x, v2->y, v2->z);
}
/*
* A-B
*/
void subtract(Vertex *result, Vertex *v1, Vertex *v2) {
subtractFrom(result, v1, v2->x, v2->y, v2->z);
}
/*
* C={x,y,z}=AxB| x=u2v3-u3v2, y=u3v1-u1v3, z=u1v2-u2v1
*/
void crossProduct(Vertex *result, Vertex *v1, Vertex *v2) {
result->x = v1->y*v2->z - v2->y*v1->z;
result->y = v2->x*v1->z - v1->x*v2->z;
result->z = v1->x*v2->y - v1->y*v2->x;
}
/*
* C=A.B=u1v1+u2v2+u3v3
*/
double dotProduct(Vertex *v1, Vertex *v2) {
return v1->x*v2->x + v1->y*v2->y + v1->z*v2->z;
}
/*
* scaling a Vector3d or Point3d
*/
void product(Vertex *result, double s, Vertex *v) {
result->x = s*v->x;
result->y = s*v->y;
result->z = s*v->z;
}
/*
* Euclidean distance(i.e. Sqrt(x2+y2+z2))
*/
double distance(Vertex *v1, Vertex *v2) {
return sqrt((v1->x-v2->x)*(v1->x-v2->x) + (v1->y-v2->y)*(v1->y-v2->y) + (v1->z-v2->z)*(v1->z-v2->z));
}
/*
* the bounding box is defined as a tuple(of Point3d,Point3d) respectively minimum and
* maximum corners of the boundingbox. Haveing min corner means having min values for X,
* Y and Z intervals and having max corner means having max values for X,Y and Z intervals.
* This module finds the min and max values in x, y and z
*/
void getRBoundingBox(Vertex *vertices, unsigned int nVertices, Vertex *min, Vertex *max) {
unsigned int currentVertex = 0;
assign(min, &vertices[0]);
assign(max, &vertices[0]);
for (currentVertex = 1; currentVertex < nVertices; ++currentVertex) {
if (vertices[currentVertex].x < min->x) min->x = vertices[currentVertex].x;
if (vertices[currentVertex].x > max->x) max->x = vertices[currentVertex].x;
if (vertices[currentVertex].y < min->y) min->y = vertices[currentVertex].y;
if (vertices[currentVertex].y > max->y) max->y = vertices[currentVertex].y;
if (vertices[currentVertex].z < min->z) min->z = vertices[currentVertex].z;
if (vertices[currentVertex].z > max->z) max->z = vertices[currentVertex].z;
}
}
void centerOfVoxel_(int x, int y, int z, Vertex *rMin, Vertex *vSize, Vertex *voxelCenter) {
voxelCenter->x = rMin->x + ((double)x)*vSize->x + 0.5*vSize->x;
voxelCenter->y = rMin->y + ((double)y)*vSize->y + 0.5*vSize->y;
voxelCenter->z = rMin->z + ((double)z)*vSize->z + 0.5*vSize->z;
}
/*
* embeds a voxel defined as {i,j,k} (three integers in Z3) in R3 as a Point3d, which
* is a {x,y,z} or a point with three "double"s
*/
void centerOfVoxel(Voxel *voxel, Vertex *rMin, Vertex *vSize, Vertex *voxelCenter) {
voxelCenter->x = rMin->x + ((double)voxel->x)*vSize->x + 0.5*vSize->x;
voxelCenter->y = rMin->y + ((double)voxel->y)*vSize->y + 0.5*vSize->y;
voxelCenter->z = rMin->z + ((double)voxel->z)*vSize->z + 0.5*vSize->z;
}
double distancePointSegment(LineSegment *l, Vertex *p) {
Vertex p0, p1, v, w;
assign(&p0, &l->start);
assign(&p1, &l->end);
subtract(&v, &l->end, &p0);
subtract(&w, p, &p0);
double c1 = dotProduct(&w, &v);
/* the point is at the left side of the line segment {p0,p1} so it is closest to p0 */
if (c1 <= 0)
return distance(p, &p0);
double c2 = dotProduct(&v, &v);
/* the point is at the right side of the line segment {p0,p1} so it is closest to p1 */
if (c2 <= c1)
return distance(p, &p1);
double b = c1/c2;
Vertex b_v, pb;
product(&b_v, b, &v);
/* Pb = P0 + b * V; the point on the line segment closest to the point in question */
add(&pb, &p0, &b_v);
return distance(p, &pb);
}
double distancePointTriangle(Face *f, Vertex *p) {
Vertex o, u, v;
assign(&o, f->p1);
subtract(&u, f->p2, &o);
subtract(&v, f->p3, &o);
/* considers a parametric equation for every point on the plane corresponding to the triangle (face) as P(s,t) */
Vertex w;
subtract(&w, p, &o);
/* here computes five double values to be used throughout the function */
double uu = dotProduct(&u, &u);
double vv = dotProduct(&v, &v);
double uv = dotProduct(&u, &v);
double wu = dotProduct(&w, &u);
double wv = dotProduct(&w, &v);
/*
* the determinant is actually the size of the cross product of two edges
* of the triangle. If the determinant is 0 then the triangle is degenerate
* as its area is zero (because the two edges are paralell)
*/
double det = abs(uv*uv-uu*vv);
if (det > 0) {
double s = -(uv*wv-vv*wu);
double t = -(uv*wu-uu*wv);
/*
t
\R2|
\ |
\|
|\
| \R1
R3|R0\
__|___\_____s
R4|R5 \R6
*/
if (s + t <= det) {
if (s < 0) {
if (t < 0) {
/* Region4, distance to V0 */
return distance(f->p1, p);
} else {
/* Region3, distance to line(V0,V2) */
LineSegment l;
assign(&l.start, f->p1);
assign(&l.end, f->p3);
return distancePointSegment(&l, p);
}
} else if (t < 0) {
/* Region5, distance to line(V0,V1) */
LineSegment l;
assign(&l.start, f->p1);
assign(&l.end, f->p2);
return distancePointSegment(&l, p);
} else {
/* Region0, distance to point(s,t) */
s /= det;
t /= det;
Vertex vertex, s_u, t_v;
product(&s_u, s, &u);
product(&t_v, t, &v);
add(&vertex, &o, &s_u);
add(&vertex, &vertex, &t_v);
return distance(&vertex, p);
}
} else {
if (s < 0) {
/* Region2, distance to V2 */
return distance(f->p3, p);
} else if (t < 0) {
/* Region6, distance to V1 */
return distance(f->p2, p);
} else {
/* Region1, distance to line(V1,V2) */
LineSegment l;
assign(&l.start, f->p2);
assign(&l.end, f->p3);
return distancePointSegment(&l, p);
}
}
} else {
return DBL_MAX;
}
}
/*
* returns 1 on success and 0 on failure; success means that the distance of
* the point to one of the triangles of the mesh is smaller than half of the
* length(norm) of the VSize vector (the longest diagon of a voxel cube)
*/
int isNear(int num_threads, Face **mesh, unsigned int nTriangles, Vertex *voxelCenter, Vertex *vSize) {
int res = 0;
unsigned int currentTriangle = 0;
double threshold = 0.5*sqrt(vSize->x*vSize->x + vSize->y*vSize->y + vSize->z*vSize->z);
for (currentTriangle = 0; currentTriangle < nTriangles; ++currentTriangle) {
if (distancePointTriangle(mesh[currentTriangle], voxelCenter) <= threshold) {
res = 1;
break;
}
}
return res;
}
/*
* using the parametric equation of a point as P(s,t) in reference to two edges of a triangle
* we find out if the intersection of a point and the plane corresponding to the triangle lies within the triangle;
* this would correspond to s and t belonging to [0,1] and s+t,=1
*/
int intersectsTriangleLine(Face *f, LineSegment *l) {
Vertex o, u, v, n;
assign(&o, f->p1);
subtract(&u, f->p2, &o);
subtract(&v, f->p3, &o);
crossProduct(&n, &u, &v);
Vertex ps, pe;
assign(&ps, &l->start);
assign(&pe, &l->end);
Vertex o_ps, pe_ps;
subtract(&o_ps, &o, &ps);
double nomin = dotProduct(&o_ps, &n);
subtract(&pe_ps, &pe, &ps);
double denom = dotProduct(&n, &pe_ps);
if (denom != 0) {
double alpha = nomin/denom;
Vertex scaled_pe_ps, p, w;
product(&scaled_pe_ps, alpha, &pe_ps);
add(&p, &ps, &scaled_pe_ps);
subtract(&w, &p, &o);
double uu = dotProduct(&u, &u);
double vv = dotProduct(&v, &v);
double uv = dotProduct(&u, &v);
double wu = dotProduct(&w, &u);
double wv = dotProduct(&w, &v);
double stdenom = uv*uv-uu*vv;
double s = (uv*wv-vv*wu)/stdenom;
double t = (uv*wu-uu*wv)/stdenom;
Vertex point, s_u, t_v;
product(&s_u, s, &u);
product(&t_v, t, &v);
add(&point, &o, &s_u);
add(&point, &point, &t_v);
if (s >= 0 && t >= 0 && s + t <= 1) {
return 1;
} else {
return 0;
}
} else {
return 0;
}
}
/*
* using the function triangle line intersection we iterate over all triangular faces of a mesh
*/
int intersectsMeshLine(Face **mesh, unsigned int nTriangles, LineSegment *l) {
unsigned int currentTriangle = 0;
for (currentTriangle = 0; currentTriangle < nTriangles; ++currentTriangle) {
if (intersectsTriangleLine(mesh[currentTriangle], l)) return 1;
} return 0;
}
/*
* we intersect a "connectivity target"(Laine, 2013) for 26-connected results; this is a 3D
* crosshair composed of 6 lines. If intersection is not null then the voxel should be included
*/
int intersectsMesh26(Face **mesh, unsigned int nTriangles, Vertex *voxelCenter, Vertex *vSize) {
LineSegment testLine;
assign(&testLine.start, voxelCenter);
assign(&testLine.end, voxelCenter);
testLine.end.x += 0.5*vSize->x;
if (intersectsMeshLine(mesh, nTriangles, &testLine)) return 1;
assign(&testLine.end, voxelCenter);
testLine.end.y += 0.5*vSize->y;
if (intersectsMeshLine(mesh, nTriangles, &testLine)) return 1;
assign(&testLine.end, voxelCenter);
testLine.end.z += 0.5*vSize->z;
if (intersectsMeshLine(mesh, nTriangles, &testLine)) return 1;
assign(&testLine.end, voxelCenter);
testLine.end.x -= 0.5*vSize->x;
if (intersectsMeshLine(mesh, nTriangles, &testLine)) return 1;
assign(&testLine.end, voxelCenter);
testLine.end.y -= 0.5*vSize->y;
if (intersectsMeshLine(mesh, nTriangles, &testLine)) return 1;
assign(&testLine.end, voxelCenter);
testLine.end.z -= 0.5*vSize->z;
if (intersectsMeshLine(mesh, nTriangles, &testLine)) return 1;
return 0;
}
/*
* we intersect a "connectivity target"(Laine, 2013) for 6-connected results with the mesh in
* question; this is a the outline of a voxel cube composed of 12 lines. If intersection is not
* null then the voxel should be included
*/
int intersectsMesh6(Face **mesh, unsigned int nTriangles, Vertex *voxelCenter, Vertex *vSize) {
unsigned int currentEdge = 0;
Vertex vertices[8];
LineSegment edges[12];
Vertex halfSize;
halfSize.x = 0.5*vSize->x;
halfSize.y = 0.5*vSize->y;
halfSize.z = 0.5*vSize->z;
addTo(&vertices[0], voxelCenter, +halfSize.x, +halfSize.y, +halfSize.z);
addTo(&vertices[1], voxelCenter, -halfSize.x, +halfSize.y, +halfSize.z);
addTo(&vertices[2], voxelCenter, -halfSize.x, -halfSize.y, +halfSize.z);
addTo(&vertices[3], voxelCenter, +halfSize.x, -halfSize.y, +halfSize.z);
addTo(&vertices[4], voxelCenter, -halfSize.x, -halfSize.y, -halfSize.z);
addTo(&vertices[5], voxelCenter, +halfSize.x, -halfSize.y, -halfSize.z);
addTo(&vertices[6], voxelCenter, +halfSize.x, +halfSize.y, -halfSize.z);
addTo(&vertices[7], voxelCenter, -halfSize.x, +halfSize.y, -halfSize.z);
assign(&edges[0].start, &vertices[0]); assign(&edges[0].end, &vertices[1]);
assign(&edges[1].start, &vertices[1]); assign(&edges[1].end, &vertices[2]);
assign(&edges[2].start, &vertices[2]); assign(&edges[2].end, &vertices[3]);
assign(&edges[3].start, &vertices[3]); assign(&edges[3].end, &vertices[0]);
assign(&edges[4].start, &vertices[0]); assign(&edges[4].end, &vertices[6]);
assign(&edges[5].start, &vertices[6]); assign(&edges[5].end, &vertices[5]);
assign(&edges[6].start, &vertices[5]); assign(&edges[6].end, &vertices[4]);
assign(&edges[7].start, &vertices[4]); assign(&edges[7].end, &vertices[7]);
assign(&edges[8].start, &vertices[5]); assign(&edges[8].end, &vertices[3]);
assign(&edges[9].start, &vertices[4]); assign(&edges[9].end, &vertices[2]);
assign(&edges[10].start, &vertices[1]); assign(&edges[10].end, &vertices[7]);
assign(&edges[11].start, &vertices[6]); assign(&edges[11].end, &vertices[7]);
for (currentEdge = 0; currentEdge < 12; ++currentEdge) {
if (intersectsMeshLine(mesh, nTriangles, &edges[currentEdge])) return 1;
}
return 0;
}
int voxelizeMesh(int num_threads, FILE *outFile, Vertex *vertices, unsigned int nVertices, Face **mesh, unsigned int nTriangles, Color *mc, int mid, Vertex *vSize, int co) {
Vertex rMin, rMax;
Voxel vMax;
/* Bounding box already in Z3 */
getRBoundingBox(vertices, nVertices, &rMin, &rMax);
rMin.x = floor(rMin.x);
rMin.y = floor(rMin.y);
rMin.z = floor(rMin.z);
#ifdef DEBUG_LOG
printf("Min: ");
printVertex(&rMin);
printf("\nMax: ");
printVertex(&rMax);
#endif
vMax.x = (unsigned int)ceil((rMax.x-rMin.x)/vSize->x);
vMax.y = (unsigned int)ceil((rMax.y-rMin.y)/vSize->y);
vMax.z = (unsigned int)ceil((rMax.z-rMin.z)/vSize->z);
#ifdef DEBUG_LOG
printf("\nVMax: ");
printVoxel(&vMax);
#endif
/* Go voxel by voxel, in the bounding box of the mesh. note that
* if the mesh is big it will slow down the whole process. It is
* better to put in many small mesh objects that one big mesh object
*/
int x, y, z;
omp_set_num_threads(num_threads);
#pragma omp parallel for private(x, y, z)
for (x = 0; x <= vMax.x; ++x) {
for (y = 0; y <= vMax.y; ++y) {
for (z = 0; z <= vMax.z; ++z) {
Vertex voxelCenter;
centerOfVoxel_(x, y, z, &rMin, vSize, &voxelCenter);
#ifdef DEBUG_LOG
printf("\nVoxel: ");
printVertex(&voxelCenter);
#endif
/* Check nearness */
int inTarget = 1;
if (!isNear(num_threads, mesh, nTriangles, &voxelCenter, vSize)) inTarget = 0;
/* Check intersection */
if (inTarget) {
if (co == 26) {
inTarget = intersectsMesh26(mesh, nTriangles, &voxelCenter, vSize);
} else if (co == 6) {
inTarget = intersectsMesh6(mesh, nTriangles, &voxelCenter, vSize);
} else {
printf("connectivity target undefined!");
inTarget = 0;
}
}
/* If it intersects, write it out */
if (inTarget) {
/* writes every voxel as a tuple<ID,x,y,z,r,g,b> */
fprintf(outFile, "%d,%f,%f,%f,%d,%d,%d\n", mid, voxelCenter.x, voxelCenter.y, voxelCenter.z, mc->r, mc->g, mc->b);
}
}
}
}
return 0;
}
|
diagmv_x_sky_u.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
// y[i] = beta * y[i] + alpha * x[i];
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
GB_unaryop__identity_fp64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_int64
// op(A') function: GB_tran__identity_fp64_int64
// C type: double
// A type: int64_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_int64
(
double *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lsm3d_blsm_openmp_v1.c | #include "openst/eikonal/lsm.h"
#define M_LSM3D_IMP_NAME "BLSMv1"
const char OPENST_LSM3D_COMPUTEPARTIAL_IMP_NAME[] = M_LSM3D_IMP_NAME;
const size_t OPENST_LSM3D_COMPUTEPARTIAL_IMP_NAME_LENGTH = sizeof(M_LSM3D_IMP_NAME);
int OpenST_LSM3D_ComputePartial(OPENST_FLOAT *U, char *LSM_UNLOCKED, OPENST_FLOAT *V,
size_t NI, size_t NJ, size_t NK,
OPENST_FLOAT HI, OPENST_FLOAT HJ, OPENST_FLOAT HK,
int start_iter, int max_iter, int *converged,
size_t BSIZE_I, size_t BSIZE_J, size_t BSIZE_K,
OPENST_FLOAT EPS){
int total_it, it, notconvergedl, notconvergedt;
int REVI, REVJ, REVK;
size_t NBI, NBJ, NBK;
#if (_OPENMP > 200203)
size_t levelr, K1, K2, kr, level, I1, I2, ir, jr;
#else
#pragma message("WARNING: size_t to ptrdiff_t cast enabled")
ptrdiff_t levelr, K1, K2, kr, level, I1, I2, ir, jr;
#endif
if(start_iter >= max_iter){
return max_iter;
}
total_it = start_iter;
notconvergedl = 0;
NBI = NI/BSIZE_I + (NI % BSIZE_I > 0);
NBJ = NJ/BSIZE_J + (NJ % BSIZE_J > 0);
NBK = NK/BSIZE_K + (NK % BSIZE_K > 0);
#pragma omp parallel default(none) \
shared(BSIZE_I, BSIZE_J, BSIZE_K, NBI, NBJ, NBK, \
start_iter, total_it, notconvergedl, NI, NJ, NK, \
U, LSM_UNLOCKED, V, HI, HJ, HK, max_iter, EPS) \
private(it, REVI, REVJ, REVK, notconvergedt, \
levelr, K1, K2, level, I1, I2, ir, jr, kr)
{
for(it = start_iter; it < max_iter; ++it){
#pragma omp single nowait
{
++total_it;
notconvergedl = 0;
}
notconvergedt = 0;
OpenST_FSM3D_GetSweepOrder(it, &REVI, &REVJ, &REVK);
for(levelr = 0; levelr < NBI + NBJ + NBK - 2; ++levelr){
K1 = (NBI + NBJ - 2 < levelr) ?
(levelr - NBI - NBJ + 2) : 0;
K2 = (NBK - 1 > levelr) ? levelr : NBK - 1;
for(kr = K1; kr <= K2; ++kr){
level = levelr - kr;
I1 = (NBJ - 1 < level) ? (level - NBJ + 1) : 0;
I2 = (NBI - 1 > level) ? level : NBI - 1;
#pragma omp for nowait schedule(dynamic,1)
for(ir = I1; ir <= I2; ++ir){
jr = level - ir;
if(OpenST_LSM3D_BlockSerial(U, LSM_UNLOCKED, V,
NI, NJ, NK,
HI, HJ, HK,
REVI, REVJ, REVK,
ir * BSIZE_I, jr * BSIZE_J,
kr * BSIZE_K,
BSIZE_I, BSIZE_J, BSIZE_K,
EPS)){
notconvergedt = 1;
}
}
}
#pragma omp barrier
}
#pragma omp atomic
notconvergedl += notconvergedt;
#pragma omp barrier
#pragma omp flush (notconvergedl)
if(!notconvergedl){
break;
}
#pragma omp barrier
}
}
*converged = (notconvergedl == 0);
return total_it;
}
|
convolution_sgemm.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
#if __mips_msa
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
int nn_size = size / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
float* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__msa_st_w(__msa_ld_w(img0, 0), tmpptr, 0);
img0 += size;
tmpptr += 4;
}
}
}
int remain_size_start = nn_size * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#else // __mips_msa
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
float* tmpptr = tmp.channel(i);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#endif // __mips_msa
#if __mips_msa
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(biasptr[0]);
v4f32 _sum1 = __msa_fill_w_f32(biasptr[1]);
v4f32 _sum2 = __msa_fill_w_f32(biasptr[2]);
v4f32 _sum3 = __msa_fill_w_f32(biasptr[3]);
v4f32 _sum4 = __msa_fill_w_f32(biasptr[4]);
v4f32 _sum5 = __msa_fill_w_f32(biasptr[5]);
v4f32 _sum6 = __msa_fill_w_f32(biasptr[6]);
v4f32 _sum7 = __msa_fill_w_f32(biasptr[7]);
for (int q = 0; q < nn; q++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr + 32);
v4f32 _val = (v4f32)__msa_ld_w(tmpptr, 0);
v4i32 _w0123 = __msa_ld_w(kptr, 0);
v4i32 _w4567 = __msa_ld_w(kptr + 4, 0);
_sum0 = __msa_fmadd_w(_sum0, _val, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val, (v4f32)__msa_splati_w(_w0123, 1));
_sum2 = __msa_fmadd_w(_sum2, _val, (v4f32)__msa_splati_w(_w0123, 2));
_sum3 = __msa_fmadd_w(_sum3, _val, (v4f32)__msa_splati_w(_w0123, 3));
_sum4 = __msa_fmadd_w(_sum4, _val, (v4f32)__msa_splati_w(_w4567, 0));
_sum5 = __msa_fmadd_w(_sum5, _val, (v4f32)__msa_splati_w(_w4567, 1));
_sum6 = __msa_fmadd_w(_sum6, _val, (v4f32)__msa_splati_w(_w4567, 2));
_sum7 = __msa_fmadd_w(_sum7, _val, (v4f32)__msa_splati_w(_w4567, 3));
tmpptr += 4;
kptr += 8;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
__msa_st_w((v4i32)_sum2, outptr2, 0);
__msa_st_w((v4i32)_sum3, outptr3, 0);
__msa_st_w((v4i32)_sum4, outptr4, 0);
__msa_st_w((v4i32)_sum5, outptr5, 0);
__msa_st_w((v4i32)_sum6, outptr6, 0);
__msa_st_w((v4i32)_sum7, outptr7, 0);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
outptr4 += 4;
outptr5 += 4;
outptr6 += 4;
outptr7 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
float sum4 = biasptr[4];
float sum5 = biasptr[5];
float sum6 = biasptr[6];
float sum7 = biasptr[7];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
sum4 += tmpptr[0] * kptr[4];
sum5 += tmpptr[0] * kptr[5];
sum6 += tmpptr[0] * kptr[6];
sum7 += tmpptr[0] * kptr[7];
tmpptr++;
kptr += 8;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr4[0] = sum4;
outptr5[0] = sum5;
outptr6[0] = sum6;
outptr7[0] = sum7;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(biasptr[0]);
v4f32 _sum1 = __msa_fill_w_f32(biasptr[1]);
v4f32 _sum2 = __msa_fill_w_f32(biasptr[2]);
v4f32 _sum3 = __msa_fill_w_f32(biasptr[3]);
for (int q = 0; q < nn; q++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr + 16);
v4f32 _val = (v4f32)__msa_ld_w(tmpptr, 0);
v4i32 _w0123 = __msa_ld_w(kptr, 0);
_sum0 = __msa_fmadd_w(_sum0, _val, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val, (v4f32)__msa_splati_w(_w0123, 1));
_sum2 = __msa_fmadd_w(_sum2, _val, (v4f32)__msa_splati_w(_w0123, 2));
_sum3 = __msa_fmadd_w(_sum3, _val, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 4;
kptr += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
__msa_st_w((v4i32)_sum2, outptr2, 0);
__msa_st_w((v4i32)_sum3, outptr3, 0);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(bias0);
for (int q = 0; q < nn; q++)
{
_sum0 = __msa_fmadd_w(_sum0, __msa_fill_w_f32(kptr[0]), (v4f32)__msa_ld_w(tmpptr, 0));
tmpptr += 4;
kptr++;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#else // __mips_msa
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int i = 0; i < size; i++)
{
const float* tmpptr = tmp.channel(i);
const float* kptr = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
#endif // __mips_msa
}
static void convolution_im2col_sgemm_transform_kernel_msa(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8b-maxk-inch-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __mips_msa
kernel_tm.create(8 * maxk, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
float* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00 += 8;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00 += 1;
}
}
}
#else
kernel_tm = kernel;
#endif // __mips_msa
}
static void convolution_im2col_sgemm_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
compress.c |
/*-------------------------------------------------------------*/
/*--- Compression machinery (not incl block sorting) ---*/
/*--- compress.c ---*/
/*-------------------------------------------------------------*/
/* ------------------------------------------------------------------
This file is part of bzip2/libbzip2, a program and library for
lossless, block-sorting data compression.
bzip2/libbzip2 version 1.0.6 of 6 September 2010
Copyright (C) 1996-2010 Julian Seward <jseward@bzip.org>
Please read the WARNING, DISCLAIMER and PATENTS sections in the
README file.
This program is released under the terms of the license contained
in the file LICENSE.
------------------------------------------------------------------ */
/* CHANGES
0.9.0 -- original version.
0.9.0a/b -- no changes in this file.
0.9.0c -- changed setting of nGroups in sendMTFValues()
so as to do a bit better on small files
*/
#include "bzlib_private.h"
#include<omp.h>
#include<pthread.h>
#include<semaphore.h>
/*---------------------------------------------------*/
/*--- Bit stream I/O ---*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
void BZ2_bsInitWrite ( EState* s )
{
s->bsLive = 0;
s->bsBuff = 0;
}
/*---------------------------------------------------*/
static
void bsFinishWrite ( EState* s )
{
while (s->bsLive > 0) {
s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
s->numZ++;
s->bsBuff <<= 8;
s->bsLive -= 8;
}
}
/*---------------------------------------------------*/
#define bsNEEDW(nz) \
{ \
while (s->bsLive >= 8) { \
s->zbits[s->numZ] \
= (UChar)(s->bsBuff >> 24); \
s->numZ++; \
s->bsBuff <<= 8; \
s->bsLive -= 8; \
} \
}
/*---------------------------------------------------*/
static
__inline__
void bsW ( EState* s, Int32 n, UInt32 v )
{
bsNEEDW ( n );
s->bsBuff |= (v << (32 - s->bsLive - n));
s->bsLive += n;
}
/*---------------------------------------------------*/
static
void bsPutUInt32 ( EState* s, UInt32 u )
{
bsW ( s, 8, (u >> 24) & 0xffL );
bsW ( s, 8, (u >> 16) & 0xffL );
bsW ( s, 8, (u >> 8) & 0xffL );
bsW ( s, 8, u & 0xffL );
}
/*---------------------------------------------------*/
static
void bsPutUChar ( EState* s, UChar c )
{
bsW( s, 8, (UInt32)c );
}
/*---------------------------------------------------*/
/*--- The back end proper ---*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
static
void makeMaps_e ( EState* s )
{
Int32 i;
s->nInUse = 0;
for (i = 0; i < 256; i++)
if (s->inUse[i]) {
s->unseqToSeq[i] = s->nInUse;
s->nInUse++;
}
}
/*---------------------------------------------------*/
static
void generateMTFValues ( EState* s )
{
UChar yy[256];
Int32 i, j;
Int32 zPend;
Int32 wr;
Int32 EOB;
/*
After sorting (eg, here),
s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
and
((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
holds the original block data.
The first thing to do is generate the MTF values,
and put them in
((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
Because there are strictly fewer or equal MTF values
than block values, ptr values in this area are overwritten
with MTF values only when they are no longer needed.
The final compressed bitstream is generated into the
area starting at
(UChar*) (&((UChar*)s->arr2)[s->nblock])
These storage aliases are set up in bzCompressInit(),
except for the last one, which is arranged in
compressBlock().
*/
UInt32* ptr = s->ptr;
UChar* block = s->block;
UInt16* mtfv = s->mtfv;
makeMaps_e ( s );
EOB = s->nInUse+1;
for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
wr = 0;
zPend = 0;
for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
for (i = 0; i < s->nblock; i++) {
UChar ll_i;
AssertD ( wr <= i, "generateMTFValues(1)" );
j = ptr[i]-1; if (j < 0) j += s->nblock;
ll_i = s->unseqToSeq[block[j]];
AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
if (yy[0] == ll_i) {
zPend++;
} else {
if (zPend > 0) {
zPend--;
while (True) {
if (zPend & 1) {
mtfv[wr] = BZ_RUNB; wr++;
s->mtfFreq[BZ_RUNB]++;
} else {
mtfv[wr] = BZ_RUNA; wr++;
s->mtfFreq[BZ_RUNA]++;
}
if (zPend < 2) break;
zPend = (zPend - 2) / 2;
};
zPend = 0;
}
{
register UChar rtmp;
register UChar* ryy_j;
register UChar rll_i;
rtmp = yy[1];
yy[1] = yy[0];
ryy_j = &(yy[1]);
rll_i = ll_i;
while ( rll_i != rtmp ) {
register UChar rtmp2;
ryy_j++;
rtmp2 = rtmp;
rtmp = *ryy_j;
*ryy_j = rtmp2;
};
yy[0] = rtmp;
j = ryy_j - &(yy[0]);
mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
}
}
}
if (zPend > 0) {
zPend--;
while (True) {
if (zPend & 1) {
mtfv[wr] = BZ_RUNB; wr++;
s->mtfFreq[BZ_RUNB]++;
} else {
mtfv[wr] = BZ_RUNA; wr++;
s->mtfFreq[BZ_RUNA]++;
}
if (zPend < 2) break;
zPend = (zPend - 2) / 2;
};
zPend = 0;
}
mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
s->nMTF = wr;
}
/*---------------------------------------------------*/
#define BZ_LESSER_ICOST 0
#define BZ_GREATER_ICOST 15
static
void sendMTFValues ( EState* s )
{
Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
Int32 nGroups, nBytes;
/*--
UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
is a global since the decoder also needs it.
Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
are also globals only used in this proc.
Made global to keep stack frame size small.
--*/
UInt16 cost[BZ_N_GROUPS];
Int32 fave[BZ_N_GROUPS];
UInt16* mtfv = s->mtfv;
if (s->verbosity >= 3)
VPrintf3( " %d in block, %d after MTF & 1-2 coding, "
"%d+2 syms in use\n",
s->nblock, s->nMTF, s->nInUse );
alphaSize = s->nInUse+2;
for (t = 0; t < BZ_N_GROUPS; t++)
for (v = 0; v < alphaSize; v++)
s->len[t][v] = BZ_GREATER_ICOST;
/*--- Decide how many coding tables to use ---*/
AssertH ( s->nMTF > 0, 3001 );
if (s->nMTF < 200) nGroups = 2; else
if (s->nMTF < 600) nGroups = 3; else
if (s->nMTF < 1200) nGroups = 4; else
if (s->nMTF < 2400) nGroups = 5; else
nGroups = 6;
/*--- Generate an initial set of coding tables ---*/
{
Int32 nPart, remF, tFreq, aFreq;
nPart = nGroups;
remF = s->nMTF;
gs = 0;
while (nPart > 0) {
tFreq = remF / nPart;
ge = gs-1;
aFreq = 0;
while (aFreq < tFreq && ge < alphaSize-1) {
ge++;
aFreq += s->mtfFreq[ge];
}
if (ge > gs
&& nPart != nGroups && nPart != 1
&& ((nGroups-nPart) % 2 == 1)) {
aFreq -= s->mtfFreq[ge];
ge--;
}
if (s->verbosity >= 3)
VPrintf5( " initial group %d, [%d .. %d], "
"has %d syms (%4.1f%%)\n",
nPart, gs, ge, aFreq,
(100.0 * (float)aFreq) / (float)(s->nMTF) );
for (v = 0; v < alphaSize; v++)
if (v >= gs && v <= ge)
s->len[nPart-1][v] = BZ_LESSER_ICOST; else
s->len[nPart-1][v] = BZ_GREATER_ICOST;
nPart--;
gs = ge+1;
remF -= aFreq;
}
}
/*---
Iterate up to BZ_N_ITERS times to improve the tables.
---*/
for (iter = 0; iter < BZ_N_ITERS; iter++) {
for (t = 0; t < nGroups; t++) fave[t] = 0;
for (t = 0; t < nGroups; t++)
for (v = 0; v < alphaSize; v++)
s->rfreq[t][v] = 0;
/*---
Set up an auxiliary length table which is used to fast-track
the common case (nGroups == 6).
---*/
if (nGroups == 6) {
for (v = 0; v < alphaSize; v++) {
s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
}
}
nSelectors = 0;
totc = 0;
gs = 0;
while (True) {
/*--- Set group start & end marks. --*/
if (gs >= s->nMTF) break;
ge = gs + BZ_G_SIZE - 1;
if (ge >= s->nMTF) ge = s->nMTF-1;
/*--
Calculate the cost of this group as coded
by each of the coding tables.
--*/
for (t = 0; t < nGroups; t++) cost[t] = 0;
if (nGroups == 6 && 50 == ge-gs+1) {
/*--- fast track the common case ---*/
register UInt32 cost01, cost23, cost45;
register UInt16 icv;
cost01 = cost23 = cost45 = 0;
# define BZ_ITER(nn) \
icv = mtfv[gs+(nn)]; \
cost01 += s->len_pack[icv][0]; \
cost23 += s->len_pack[icv][1]; \
cost45 += s->len_pack[icv][2]; \
BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
# undef BZ_ITER
cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
} else {
/*--- slow version which correctly handles all situations ---*/
for (i = gs; i <= ge; i++) {
UInt16 icv = mtfv[i];
for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
}
}
/*--
Find the coding table which is best for this group,
and record its identity in the selector table.
--*/
bc = 999999999; bt = -1;
for (t = 0; t < nGroups; t++)
if (cost[t] < bc) { bc = cost[t]; bt = t; };
totc += bc;
fave[bt]++;
s->selector[nSelectors] = bt;
nSelectors++;
/*--
Increment the symbol frequencies for the selected table.
--*/
if (nGroups == 6 && 50 == ge-gs+1) {
/*--- fast track the common case ---*/
# define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
# undef BZ_ITUR
} else {
/*--- slow version which correctly handles all situations ---*/
for (i = gs; i <= ge; i++)
s->rfreq[bt][ mtfv[i] ]++;
}
gs = ge+1;
}
if (s->verbosity >= 3) {
VPrintf2 ( " pass %d: size is %d, grp uses are ",
iter+1, totc/8 );
for (t = 0; t < nGroups; t++)
VPrintf1 ( "%d ", fave[t] );
VPrintf0 ( "\n" );
}
/*--
Recompute the tables based on the accumulated frequencies.
--*/
/* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
comment in huffman.c for details. */
for (t = 0; t < nGroups; t++)
BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]),
alphaSize, 17 /*20*/ );
}
AssertH( nGroups < 8, 3002 );
AssertH( /* nSelectors < 32768 && */
nSelectors <= BZ_MAX_SELECTORS,
3003 );
/*--- Compute MTF values for the selectors. ---*/
{
UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
for (i = 0; i < nGroups; i++) pos[i] = i;
for (i = 0; i < nSelectors; i++) {
ll_i = s->selector[i];
j = 0;
tmp = pos[j];
while ( ll_i != tmp ) {
j++;
tmp2 = tmp;
tmp = pos[j];
pos[j] = tmp2;
};
pos[0] = tmp;
s->selectorMtf[i] = j;
}
};
/*--- Assign actual codes for the tables. --*/
for (t = 0; t < nGroups; t++) {
minLen = 32;
maxLen = 0;
for (i = 0; i < alphaSize; i++) {
if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
if (s->len[t][i] < minLen) minLen = s->len[t][i];
}
AssertH ( !(maxLen > 17 /*20*/ ), 3004 );
AssertH ( !(minLen < 1), 3005 );
BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]),
minLen, maxLen, alphaSize );
}
/*--- Transmit the mapping table. ---*/
{
Bool inUse16[16];
for (i = 0; i < 16; i++) {
inUse16[i] = False;
for (j = 0; j < 16; j++)
if (s->inUse[i * 16 + j]) inUse16[i] = True;
}
nBytes = s->numZ;
for (i = 0; i < 16; i++)
if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
for (i = 0; i < 16; i++)
if (inUse16[i])
for (j = 0; j < 16; j++) {
if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
}
if (s->verbosity >= 3)
VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes );
}
/*--- Now the selectors. ---*/
nBytes = s->numZ;
bsW ( s, 5 /* 3 */, nGroups ); // changed @aditya
bsW ( s, 17 /* 15 */, nSelectors ); // changed @aditya
for (i = 0; i < nSelectors; i++) {
for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
bsW(s,1,0);
}
if (s->verbosity >= 3)
VPrintf1( "selectors %d, ", s->numZ-nBytes );
/*--- Now the coding tables. ---*/
nBytes = s->numZ;
for (t = 0; t < nGroups; t++) {
Int32 curr = s->len[t][0];
bsW ( s, 5, curr );
for (i = 0; i < alphaSize; i++) {
while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
bsW ( s, 1, 0 );
}
}
if (s->verbosity >= 3)
VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
/*--- And finally, the block data proper ---*/
nBytes = s->numZ;
selCtr = 0;
gs = 0;
while (True) {
if (gs >= s->nMTF) break;
ge = gs + BZ_G_SIZE - 1;
if (ge >= s->nMTF) ge = s->nMTF-1;
AssertH ( s->selector[selCtr] < nGroups, 3006 );
if (nGroups == 6 && 50 == ge-gs+1) {
/*--- fast track the common case ---*/
UInt16 mtfv_i;
UChar* s_len_sel_selCtr
= &(s->len[s->selector[selCtr]][0]);
Int32* s_code_sel_selCtr
= &(s->code[s->selector[selCtr]][0]);
# define BZ_ITAH(nn) \
mtfv_i = mtfv[gs+(nn)]; \
bsW ( s, \
s_len_sel_selCtr[mtfv_i], \
s_code_sel_selCtr[mtfv_i] )
BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
# undef BZ_ITAH
} else {
/*--- slow version which correctly handles all situations ---*/
for (i = gs; i <= ge; i++) {
bsW ( s,
s->len [s->selector[selCtr]] [mtfv[i]],
s->code [s->selector[selCtr]] [mtfv[i]] );
}
}
gs = ge+1;
selCtr++;
}
AssertH( selCtr == nSelectors, 3007 );
if (s->verbosity >= 3)
VPrintf1( "codes %d\n", s->numZ-nBytes );
}
void merge_two_sort_arrays ( EState *s )
{
unsigned char *block = (unsigned char*) s->arr2;
unsigned int *h_first_sort_rank = (unsigned int*) s->arr1_first_sort_rank;
unsigned int *h_first_sort_index = (unsigned int*) s->arr1_first_sort;
unsigned int *h_second_sort_index = (unsigned int*) s->arr1_second_sort;
unsigned int *order = (unsigned int*) s->arr1;
/* stores position of index 0 in BWT transform */
s->origPtr = -1;
int originalLength = s->nblock;
int firstSortLength = s->first_sort_length;
int secondSortLength = originalLength - firstSortLength;
int countOrderArr = 0;
int countFirstArr = 0;
int countSecondArr = 0;
int indexFirst;
int indexSecond;
for(countOrderArr = 0; countOrderArr < originalLength && countFirstArr < firstSortLength && countSecondArr < secondSortLength; countOrderArr++) {
indexFirst = h_first_sort_index[countFirstArr];
indexSecond = h_second_sort_index[countSecondArr];
if(block[indexFirst] != block[indexSecond]) {
block[indexFirst] < block[indexSecond] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
continue;
}
if(indexFirst == originalLength - 1) {
if(block[0] == block[indexSecond + 1]) {
if(indexSecond == originalLength - 2) {
if(block[1] == block[0]) {
h_first_sort_rank[2] < h_first_sort_rank[1] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
} else {
block[1] < block[0] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else {
h_first_sort_rank[1] < h_first_sort_rank[indexSecond+2] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else {
block[0] < block[indexSecond+1] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else if(indexFirst % 3 == 1) {
(h_first_sort_rank[indexFirst + 1] < h_first_sort_rank[indexSecond + 1]) ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
} else if(indexFirst % 3 == 2) {
if(block[indexFirst+1] == block[indexSecond+1]) {
if(indexFirst + 2 == originalLength || indexSecond+2 == originalLength) {
int itrIndexFirst = (indexFirst+2) % originalLength;
int itrIndexSecond = (indexSecond+2) % originalLength;
int foundDifference = 0;
while(itrIndexFirst % 3 == 0 || itrIndexSecond % 3 == 0) {
if(block[itrIndexFirst] != block[itrIndexSecond]) {
block[itrIndexFirst] < block[itrIndexSecond] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
foundDifference = 1;
break;
}
itrIndexFirst = (itrIndexFirst+1)%originalLength;
itrIndexSecond = (itrIndexSecond+1)%originalLength;
}
if(foundDifference == 1) {
continue;
}
h_first_sort_rank[itrIndexFirst] < h_first_sort_rank[itrIndexSecond] ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
} else {
(h_first_sort_rank[indexFirst+2] < h_first_sort_rank[indexSecond+2]) ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
} else {
(block[indexFirst+1] < block[indexSecond+1]) ? order[countOrderArr] = h_first_sort_index[countFirstArr++] : order[countOrderArr] = h_second_sort_index[countSecondArr++];
}
}
if(order[countOrderArr] == 0) {
s->origPtr = countOrderArr;
}
}
while(countFirstArr < firstSortLength) {
order[countOrderArr] = h_first_sort_index[countFirstArr];
if(order[countOrderArr] == 0) {
s->origPtr = countOrderArr;
}
countFirstArr++;
countOrderArr++;
}
while(countSecondArr < secondSortLength) {
order[countOrderArr] = h_second_sort_index[countSecondArr];
if(order[countOrderArr] == 0) {
s->origPtr = countOrderArr;
}
countSecondArr++;
countOrderArr++;
}
AssertH( s->origPtr != -1, 1003 );
return;
}
Bool blocksort_wrapper( EState *s ) {
BZ_INITIALISE_CRC( s->blockCRC );
if (s->nblock > 0) {
BZ_FINALISE_CRC ( s->blockCRC );
s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
s->combinedCRC ^= s->blockCRC;
if (s->blockNo > 1) s->numZ = 0;
if (s->verbosity >= 2)
VPrintf4( " block %d: crc = 0x%08x, "
"combined CRC = 0x%08x, size = %d\n",
s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
s->first_sort_length = gpuBlockSort( (UChar*) s->arr2, s->arr1, s->arr1_first_sort, s->arr1_second_sort, s->arr1_first_sort_rank, s->nblock, &(s->sortingDepth));
/* loop moved to merge two arrays
UInt32* ptr = s->ptr;
s->origPtr = -1;
Int32 i;
for (i = 0; i < s->nblock; i++)
if (ptr[i] == 0)
{ s->origPtr = i; break; };
AssertH( s->origPtr != -1, 1003 ); */
}
return True;
}
Bool mtf_huff_wrapper( EState* s, Bool is_last_block ) {
s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
#ifdef PRINT_DEBUG
printf("BZ2_compressBlock %d\n",s->blockNo);
#endif
// start with a fresh buffer with every block
BZ2_bsInitWrite( s );
/*-- If this is the first block, create the stream header. --*/
if (s->blockNo == 1) {
#ifdef PRINT_DEBUG
printf("This is the first block\n");
#endif
bsPutUChar ( s, BZ_HDR_B );
bsPutUChar ( s, BZ_HDR_Z );
bsPutUChar ( s, BZ_HDR_h );
bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
}
if (s->nblock > 0) {
bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
/*-- Now the block's CRC, so it is in a known place. --*/
bsPutUInt32 ( s, s->blockCRC );
/*--
Now a single bit indicating (non-)randomisation.
As of version 0.9.5, we use a better sorting algorithm
which makes randomisation unnecessary. So always set
the randomised bit to 'no'. Of course, the decoder
still needs to be able to handle randomised blocks
so as to maintain backwards compatibility with
older versions of bzip2.
--*/
bsW(s,1,0);
bsW ( s, 24, s->origPtr );
generateMTFValues ( s );
sendMTFValues ( s );
}
/*-- If this is the last block, add the stream trailer. --*/
if (is_last_block) {
#ifdef PRINT_DEBUG
printf("This is the last block\n");
#endif
bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
bsPutUInt32 ( s, s->combinedCRC );
if (s->verbosity >= 2)
VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
bsFinishWrite ( s );
}
return True;
}
Bool BZ2_compressBlock_only_CPU ( EState* s, Bool is_last_block ) {
if (s->nblock > 0) {
BZ_FINALISE_CRC ( s->blockCRC );
s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
s->combinedCRC ^= s->blockCRC;
if (s->blockNo > 1) s->numZ = 0;
if (s->verbosity >= 2)
VPrintf4( " block %d: crc = 0x%08x, "
"combined CRC = 0x%08x, size = %d\n",
s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
BZ2_blockSort ( s );
}
s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
/*-- If this is the first block, create the stream header. --*/
BZ2_bsInitWrite ( s );
if (s->blockNo == 1) {
bsPutUChar ( s, BZ_HDR_B );
bsPutUChar ( s, BZ_HDR_Z );
bsPutUChar ( s, BZ_HDR_h );
bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
}
if (s->nblock > 0) {
bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
/*-- Now the block's CRC, so it is in a known place. --*/
bsPutUInt32 ( s, s->blockCRC );
/*--
Now a single bit indicating (non-)randomisation.
As of version 0.9.5, we use a better sorting algorithm
which makes randomisation unnecessary. So always set
the randomised bit to 'no'. Of course, the decoder
still needs to be able to handle randomised blocks
so as to maintain backwards compatibility with
older versions of bzip2.
--*/
bsW(s,1,0);
bsW ( s, 24, s->origPtr );
generateMTFValues ( s );
sendMTFValues ( s );
}
/*-- If this is the last block, add the stream trailer. --*/
if (is_last_block) {
bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
bsPutUInt32 ( s, s->combinedCRC );
if (s->verbosity >= 2)
VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
bsFinishWrite ( s );
}
return True;
}
/*---------------------------------------------------*/
void BZ2_compressBlocks ( bz_stream* strm)
{
gpuSetDevice(0);
struct timespec t1, t2;
clock_gettime(CLOCK_MONOTONIC, &t1);
Int32 atomic_used_count = 1;
Int32 done_block_sort = 0;
Int32 done_block_sort_queue[BZ_MAX_STATE_COUNT];
Int32 done_huff_mtf = 0;
Int32 finalGPUBlocks = BZ_MAX_STATE_COUNT;
Int32 num_procs = omp_get_num_procs();
printf("Block size : %d\n",((EState *)strm->state[0])->nblockMAX + 19);
printf("numThreads read %d\n",((EState*)strm->state[0])->numThreads);
Int32 num_threads = ((EState*)strm->state[0])->numThreads/*+2*/;
omp_set_num_threads(num_threads+2);
printf("Number of additional CPU threads %d\n",num_threads+1);
#pragma omp parallel shared(done_block_sort, done_block_sort_queue, done_huff_mtf, finalGPUBlocks)
{
int threadID = omp_get_thread_num();
Int32 count;
Int32 currentHuffMtf = -1;
EState *s;
Bool is_last_block = False;
if(threadID == 0) {
struct timespec thread1_T1, thread1_T2;
clock_gettime(CLOCK_MONOTONIC, &thread1_T1);
blocksort_wrapper((EState*) strm->state[0]);
done_block_sort_queue[done_block_sort] = 0;
done_block_sort++;
while(true) {
#pragma omp critical
{
count = atomic_used_count;
atomic_used_count++;
}
if(count > strm->state_fill_count) {
finalGPUBlocks = done_block_sort;
break;
}
s = (EState *)strm->state[count];
if(blocksort_wrapper( s )) {
done_block_sort_queue[done_block_sort] = count;
done_block_sort++;
} else {
printf("blocksort_wrapper failed for blockNo %d\n",count);
exit(2);
}
}
clock_gettime(CLOCK_MONOTONIC, &thread1_T2);
double thread1_diff = thread1_T2.tv_sec - thread1_T1.tv_sec + ((thread1_T2.tv_nsec - thread1_T1.tv_nsec)/1000000000.0);
printf("[Time thread1] %lf\n",thread1_diff);
}
if(threadID == 1) {
struct timespec thread2_T1, thread2_T2;
clock_gettime(CLOCK_MONOTONIC, &thread2_T1);
double thread2_work = 0.0;
struct timespec thread2_work1, thread2_work2;
while(true) {
if(done_block_sort > done_huff_mtf) {
clock_gettime(CLOCK_MONOTONIC, &thread2_work1);
currentHuffMtf = done_block_sort_queue[done_huff_mtf];
done_huff_mtf++;
s = (EState *)strm->state[currentHuffMtf];
merge_two_sort_arrays ( s );
if(currentHuffMtf == strm->state_fill_count) {
is_last_block = True;
}
if( !mtf_huff_wrapper( s, is_last_block ) ) {
printf("mtf_huff_wrapper failed for blockNo %d\n",currentHuffMtf);
exit(2);
}
clock_gettime(CLOCK_MONOTONIC, &thread2_work2);
thread2_work+= thread2_work2.tv_sec - thread2_work1.tv_sec + ((thread2_work2.tv_nsec - thread2_work1.tv_nsec)/1000000000.0);
}
if(done_huff_mtf == finalGPUBlocks) {
break;
}
}
clock_gettime(CLOCK_MONOTONIC, &thread2_T2);
double thread2_diff = thread2_T2.tv_sec - thread2_T1.tv_sec + ((thread2_T2.tv_nsec - thread2_T1.tv_nsec)/1000000000.0);
printf("[Time thread2 work] %lf\n",thread2_work);
printf("[Time thread2] %lf\n",thread2_diff);
}
if(threadID >= 2) {
while(true) {
#pragma omp critical
{
count = atomic_used_count;
atomic_used_count++;
}
if(count > strm->state_fill_count) {
break;
}
s = (EState *)strm->state[count];
if(!BZ2_compressBlock_only_CPU( s, (Bool) count == strm->state_fill_count)) {
printf("BZ2_compressBlock_only_CPU failed at blockNo %d\n",count);
exit(2);
}
}
}
}
/*
UInt32 count;
#pragma omp parallel for
for(count = 0; count <= strm->state_fill_count; count++) {
EState *s = (EState*)strm->state[count];
if(!BZ2_compressBlock_only_CPU( s, (Bool) count == strm->state_fill_count)) {
printf("BZ2_compressBlock_only_CPU failed at blockNo %d\n",count);
exit(2);
}
} */
printf("Number of CPU threads %d\n",num_threads - 2);
printf("Out of the total %d blocks GPU did %d\n",strm->state_fill_count+1,finalGPUBlocks);
clock_gettime(CLOCK_MONOTONIC, &t2);
double t = t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
printf("total compression time (with overlap) %lf\n",t);
}
/* Global Variables for PThreads Producer-Consumer pipeline */
bz_stream* global_strm;
sem_t mutex, full, empty;
int buff[BZ_MAX_STATE_COUNT];
int producerCount = 0, consumerCount = 0;
void *produce(void *arg) {
unsigned int i;
struct timespec t1, t2;
double thread1_work = 0.0;
int maxSortingDepth = 0;
int avgSortingDepth = 0;
for(i = 0; i <= global_strm->state_fill_count; i++) {
clock_gettime(CLOCK_MONOTONIC, &t1);
EState *s = (EState*)global_strm->state[i];
if( !blocksort_wrapper( s )) {
printf("[ERROR] blocksort_wrapper_failed\n");
exit(1);
}
clock_gettime(CLOCK_MONOTONIC, &t2);
if(i!=global_strm->state_fill_count) {
thread1_work += t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
if(s->sortingDepth > maxSortingDepth) maxSortingDepth = s->sortingDepth;
avgSortingDepth += s->sortingDepth;
}
sem_wait(&empty);
sem_wait(&mutex);
buff[++producerCount] = i;
sem_post(&mutex);
sem_post(&full);
}
int divFactor = global_strm->state_fill_count;
printf("[Time BWT] %lf\n",thread1_work);
printf("[Average Time BWT] %lf\n",thread1_work/divFactor);
printf("[Max Sorting Depth] %d\n",maxSortingDepth);
printf("[Average Sorting Depth] %d\n",(int)((1.0*avgSortingDepth)/divFactor));
}
void *consume(void *arg) {
int item, i;
bool is_last_block = false;
struct timespec t1, t2;
struct timespec tmerge;
double tmergeTotal = 0.0;
double thread2_work = 0.0;
for(i = 0; i <= global_strm->state_fill_count; i++) {
sem_wait(&full);
sem_wait(&mutex);
item = buff[++consumerCount];
sem_post(&mutex);
sem_post(&empty);
clock_gettime(CLOCK_MONOTONIC, &t1);
EState *s = (EState*)global_strm->state[item];
merge_two_sort_arrays( s );
clock_gettime(CLOCK_MONOTONIC, &tmerge);
if(item == global_strm->state_fill_count) {
is_last_block = true;
} else {
tmergeTotal += tmerge.tv_sec - t1.tv_sec + ((tmerge.tv_nsec - t1.tv_nsec)/1000000000.0);
}
if(!mtf_huff_wrapper( s, is_last_block )) {
printf("[ERROR] mtf_huff_wrapper\n");
exit(1);
}
clock_gettime(CLOCK_MONOTONIC, &t2);
thread2_work += t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
}
int divFactor = global_strm->state_fill_count;
printf("[Total Time Merge] %lf\n",tmergeTotal);
printf("[Average Time Merge] %lf\n",tmergeTotal/divFactor);
printf("[Time Merge, MTF+HUFF] %lf\n",thread2_work);
}
void BZ2_compressBlocks_pthreads ( bz_stream* strm)
{
printf("[BZ2_compressBlocks_pthreads] Total Blocks : %d, Block Size %d\n", strm->state_fill_count, ((EState*)strm->state[0])->nblockMAX);
gpuSetDevice(0);
global_strm = strm;
struct timespec t1, t2;
clock_gettime(CLOCK_MONOTONIC, &t1);
pthread_t tid1, tid2;
sem_init(&mutex, 0, 1);
sem_init(&full, 0, 0);
sem_init(&empty, 0, strm->state_fill_count);
pthread_create(&tid1, NULL, produce, NULL);
pthread_create(&tid2, NULL, consume, NULL);
pthread_join(tid1, NULL);
pthread_join(tid2, NULL);
clock_gettime(CLOCK_MONOTONIC, &t2);
double t = t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
printf("total compression time (with pthreads overlap) %lf\n",t);
}
void BZ2_compressBlocks_without_overlap (bz_stream* strm) {
struct timespec t1, t2;
clock_gettime(CLOCK_MONOTONIC, &t1);
UInt32 count;
EState *s;
Bool is_last_block = False;
for(count = 0; count <= strm->state_fill_count; count++) {
s = (EState *)strm->state[count];
BZ_INITIALISE_CRC( s->blockCRC );
if(count == strm->state_fill_count) {
is_last_block = True;
}
if (s->nblock > 0) {
BZ_FINALISE_CRC ( s->blockCRC );
s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
s->combinedCRC ^= s->blockCRC;
if (s->blockNo > 1) s->numZ = 0;
if (s->verbosity >= 2)
VPrintf4( " block %d: crc = 0x%08x, "
"combined CRC = 0x%08x, size = %d\n",
s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
s->first_sort_length = gpuBlockSort( (UChar*) s->arr2, s->arr1, s->arr1_first_sort, s->arr1_second_sort, s->arr1_first_sort_rank, s->nblock, &(s->sortingDepth));
merge_two_sort_arrays( s );
UInt32* ptr = s->ptr;
s->origPtr = -1;
Int32 i;
for (i = 0; i < s->nblock; i++)
if (ptr[i] == 0)
{ s->origPtr = i; break; };
AssertH( s->origPtr != -1, 1003 );
}
s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
#ifdef PRINT_DEBUG
printf("BZ2_compressBlock %d\n",s->blockNo);
#endif
// start with a fresh buffer with every block
BZ2_bsInitWrite( s );
/*-- If this is the first block, create the stream header. --*/
if (s->blockNo == 1) {
#ifdef PRINT_DEBUG
printf("This is the first block\n");
#endif
//BZ2_bsInitWrite ( s );
bsPutUChar ( s, BZ_HDR_B );
bsPutUChar ( s, BZ_HDR_Z );
bsPutUChar ( s, BZ_HDR_h );
bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
}
if (s->nblock > 0) {
bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
/*-- Now the block's CRC, so it is in a known place. --*/
bsPutUInt32 ( s, s->blockCRC );
/*--
Now a single bit indicating (non-)randomisation.
As of version 0.9.5, we use a better sorting algorithm
which makes randomisation unnecessary. So always set
the randomised bit to 'no'. Of course, the decoder
still needs to be able to handle randomised blocks
so as to maintain backwards compatibility with
older versions of bzip2.
--*/
bsW(s,1,0);
bsW ( s, 24, s->origPtr );
generateMTFValues ( s );
sendMTFValues ( s );
}
/*-- If this is the last block, add the stream trailer. --*/
if (is_last_block) {
#ifdef PRINT_DEBUG
printf("This is the last block\n");
#endif
bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
bsPutUInt32 ( s, s->combinedCRC );
if (s->verbosity >= 2)
VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
bsFinishWrite ( s );
}
}
clock_gettime(CLOCK_MONOTONIC, &t2);
double t = t2.tv_sec - t1.tv_sec + ((t2.tv_nsec - t1.tv_nsec)/1000000000.0);
printf("total compression time (without overlap) %lf\n",t);
}
/*-------------------------------------------------------------*/
/*--- end compress.c ---*/
/*-------------------------------------------------------------*/
|
kgraph-data.h | #ifndef WDONG_KGRAPH_DATA
#define WDONG_KGRAPH_DATA
#include <cmath>
#include <cstring>
#include <malloc.h>
#include <vector>
#include <fstream>
#include <stdexcept>
#include <boost/assert.hpp>
// #ifdef __GNUC__
#ifdef __AVX__
#define KGRAPH_MATRIX_ALIGN 32
#else
#ifdef __SSE2__
#define KGRAPH_MATRIX_ALIGN 16
#else
#define KGRAPH_MATRIX_ALIGN 4
#endif
#endif
// #endif
namespace kgraph {
/// L2 square distance with AVX instructions.
/** AVX instructions have strong alignment requirement for t1 and t2.
*/
extern float float_l2sqr_avx (float const *t1, float const *t2, unsigned dim);
/// L2 square distance with SSE2 instructions.
extern float float_l2sqr_sse2 (float const *t1, float const *t2, unsigned dim);
extern float float_l2sqr_sse2 (float const *, unsigned dim);
extern float float_dot_sse2 (float const *, float const *, unsigned dim);
/// L2 square distance for uint8_t with SSE2 instructions (for SIFT).
extern float uint8_l2sqr_sse2 (uint8_t const *t1, uint8_t const *t2, unsigned dim);
extern float float_l2sqr (float const *, float const *, unsigned dim);
extern float float_l2sqr (float const *, unsigned dim);
extern float float_dot (float const *, float const *, unsigned dim);
using std::vector;
/// namespace for various distance metrics.
namespace metric {
/// L2 square distance.
struct l2sqr {
template <typename T>
/// L2 square distance.
static float apply (T const *t1, T const *t2, unsigned dim) {
float r = 0;
for (unsigned i = 0; i < dim; ++i) {
float v = float(t1[i]) - float(t2[i]);
v *= v;
r += v;
}
return r;
}
/// inner product.
template <typename T>
static float dot (T const *t1, T const *t2, unsigned dim) {
float r = 0;
for (unsigned i = 0; i < dim; ++i) {
r += float(t1[i]) *float(t2[i]);
}
return r;
}
/// L2 norm.
template <typename T>
static float norm2 (T const *t1, unsigned dim) {
float r = 0;
for (unsigned i = 0; i < dim; ++i) {
float v = float(t1[i]);
v *= v;
r += v;
}
return r;
}
};
struct l2 {
template <typename T>
static float apply (T const *t1, T const *t2, unsigned dim) {
return sqrt(l2sqr::apply<T>(t1, t2, dim));
}
};
}
/// Matrix data.
template <typename T, unsigned A = KGRAPH_MATRIX_ALIGN>
class Matrix {
unsigned col;
unsigned row;
size_t stride;
char *data;
void reset (unsigned r, unsigned c) {
row = r;
col = c;
stride = (sizeof(T) * c + A - 1) / A * A;
/*
data.resize(row * stride);
*/
if (data) free(data);
data = (char *)memalign(A, row * stride); // SSE instruction needs data to be aligned
if (!data) throw runtime_error("memalign");
}
public:
Matrix (): col(0), row(0), stride(0), data(0) {}
Matrix (unsigned r, unsigned c): data(0) {
reset(r, c);
}
~Matrix () {
if (data) free(data);
}
unsigned size () const {
return row;
}
unsigned dim () const {
return col;
}
size_t step () const {
return stride;
}
void resize (unsigned r, unsigned c) {
reset(r, c);
}
T const *operator [] (unsigned i) const {
return reinterpret_cast<T const *>(&data[stride * i]);
}
T *operator [] (unsigned i) {
return reinterpret_cast<T *>(&data[stride * i]);
}
void zero () {
memset(data, 0, row * stride);
}
void normalize2 () {
#pragma omp parallel for
for (unsigned i = 0; i < row; ++i) {
T *p = operator[](i);
double sum = metric::l2sqr::norm2(p, col);
sum = std::sqrt(sum);
for (unsigned j = 0; j < col; ++j) {
p[j] /= sum;
}
}
}
void load (const std::string &path, unsigned dim, unsigned skip = 0, unsigned gap = 0) {
std::ifstream is(path.c_str(), std::ios::binary);
if (!is) throw io_error(path);
is.seekg(0, std::ios::end);
size_t size = is.tellg();
size -= skip;
unsigned line = sizeof(T) * dim + gap;
unsigned N = size / line;
reset(N, dim);
zero();
is.seekg(skip, std::ios::beg);
for (unsigned i = 0; i < N; ++i) {
is.read(&data[stride * i], sizeof(T) * dim);
is.seekg(gap, std::ios::cur);
}
if (!is) throw io_error(path);
}
void load_lshkit (std::string const &path) {
static const unsigned LSHKIT_HEADER = 3;
std::ifstream is(path.c_str(), std::ios::binary);
unsigned header[LSHKIT_HEADER]; /* entry size, row, col */
is.read((char *)header, sizeof header);
if (!is) throw io_error(path);
if (header[0] != sizeof(T)) throw io_error(path);
is.close();
unsigned D = header[2];
unsigned skip = LSHKIT_HEADER * sizeof(unsigned);
unsigned gap = 0;
load(path, D, skip, gap);
}
void save_lshkit (std::string const &path) {
std::ofstream os(path.c_str(), std::ios::binary);
unsigned header[3];
assert(sizeof header == 3*4);
header[0] = sizeof(T);
header[1] = row;
header[2] = col;
os.write((const char *)header, sizeof(header));
for (unsigned i = 0; i < row; ++i) {
os.write(&data[stride * i], sizeof(T) * col);
}
}
};
/// Matrix proxy to interface with 3rd party libraries (FLANN, OpenCV, NumPy).
template <typename DATA_TYPE, unsigned A = KGRAPH_MATRIX_ALIGN>
class MatrixProxy {
unsigned rows;
unsigned cols; // # elements, not bytes, in a row,
size_t stride; // # bytes in a row, >= cols * sizeof(element)
uint8_t const *data;
public:
MatrixProxy (Matrix<DATA_TYPE> const &m)
: rows(m.size()), cols(m.dim()), stride(m.step()), data(reinterpret_cast<uint8_t const *>(m[0])) {
}
#ifndef __AVX__
#ifdef FLANN_DATASET_H_
/// Construct from FLANN matrix.
MatrixProxy (flann::Matrix<DATA_TYPE> const &m)
: rows(m.rows), cols(m.cols), stride(m.stride), data(m.data) {
if (stride % A) throw invalid_argument("bad alignment");
}
#endif
#ifdef CV_MAJOR_VERSION
/// Construct from OpenCV matrix.
MatrixProxy (cv::Mat const &m)
: rows(m.rows), cols(m.cols), stride(m.step), data(m.data) {
if (stride % A) throw invalid_argument("bad alignment");
}
#endif
#ifdef NPY_NDARRAYOBJECT_H
/// Construct from NumPy matrix.
MatrixProxy (PyArrayObject *obj) {
if (!obj || (obj->nd != 2)) throw invalid_argument("bad array shape");
rows = obj->dimensions[0];
cols = obj->dimensions[1];
stride = obj->strides[0];
data = reinterpret_cast<uint8_t const *>(obj->data);
if (obj->descr->elsize != sizeof(DATA_TYPE)) throw invalid_argument("bad data type size");
if (stride % A) throw invalid_argument("bad alignment");
if (!(stride >= cols * sizeof(DATA_TYPE))) throw invalid_argument("bad stride");
}
#endif
#endif
unsigned size () const {
return rows;
}
unsigned dim () const {
return cols;
}
DATA_TYPE const *operator [] (unsigned i) const {
return reinterpret_cast<DATA_TYPE const *>(data + stride * i);
}
DATA_TYPE *operator [] (unsigned i) {
return const_cast<DATA_TYPE *>(reinterpret_cast<DATA_TYPE const *>(data + stride * i));
}
};
/// Oracle for Matrix or MatrixProxy.
/** DATA_TYPE can be Matrix or MatrixProxy,
* DIST_TYPE should be one class within the namespace kgraph.metric.
*/
template <typename DATA_TYPE, typename DIST_TYPE>
class MatrixOracle: public kgraph::IndexOracle {
MatrixProxy<DATA_TYPE> proxy;
public:
class SearchOracle: public kgraph::SearchOracle {
MatrixProxy<DATA_TYPE> proxy;
DATA_TYPE const *query;
public:
SearchOracle (MatrixProxy<DATA_TYPE> const &p, DATA_TYPE const *q): proxy(p), query(q) {
}
virtual unsigned size () const {
return proxy.size();
}
virtual float operator () (unsigned i) const {
return DIST_TYPE::apply(proxy[i], query, proxy.dim());
}
};
template <typename MATRIX_TYPE>
MatrixOracle (MATRIX_TYPE const &m): proxy(m) {
}
virtual unsigned size () const {
return proxy.size();
}
virtual float operator () (unsigned i, unsigned j) const {
return DIST_TYPE::apply(proxy[i], proxy[j], proxy.dim());
}
SearchOracle query (DATA_TYPE const *query) const {
return SearchOracle(proxy, query);
}
};
inline float AverageRecall (Matrix<float> const &gs, Matrix<float> const &result, unsigned K = 0) {
if (K == 0) {
K = result.dim();
}
if (!(gs.dim() >= K)) throw std::invalid_argument("gs.dim() >= K");
if (!(result.dim() >= K)) throw std::invalid_argument("result.dim() >= K");
if (!(gs.size() >= result.size())) throw std::invalid_argument("gs.size() > result.size()");
float sum = 0;
for (unsigned i = 0; i < result.size(); ++i) {
float const *gs_row = gs[i];
float const *re_row = result[i];
// compare
unsigned found = 0;
unsigned gs_n = 0;
unsigned re_n = 0;
while ((gs_n < K) && (re_n < K)) {
if (gs_row[gs_n] < re_row[re_n]) {
++gs_n;
}
else if (gs_row[gs_n] == re_row[re_n]) {
++found;
++gs_n;
++re_n;
}
else {
throw std::runtime_error("distance is unstable");
}
}
sum += float(found) / K;
}
return sum / result.size();
}
}
#ifndef KGRAPH_NO_VECTORIZE
// #ifdef __GNUC__
#ifdef __AVX__
#if 0
namespace kgraph { namespace metric {
template <>
inline float l2sqr::apply<float> (float const *t1, float const *t2, unsigned dim) {
return float_l2sqr_avx(t1, t2, dim);
}
}}
#endif
#else
#ifdef __SSE2__
namespace kgraph { namespace metric {
template <>
inline float l2sqr::apply<float> (float const *t1, float const *t2, unsigned dim) {
return float_l2sqr_sse2(t1, t2, dim);
}
template <>
inline float l2sqr::dot<float> (float const *t1, float const *t2, unsigned dim) {
return float_dot_sse2(t1, t2, dim);
}
template <>
inline float l2sqr::norm2<float> (float const *t1, unsigned dim) {
return float_l2sqr_sse2(t1, dim);
}
template <>
inline float l2sqr::apply<uint8_t> (uint8_t const *t1, uint8_t const *t2, unsigned dim) {
return uint8_l2sqr_sse2(t1, t2, dim);
}
}}
#endif
#endif
// #endif
#endif
#endif
|
omp_matrix_vector.c | #include <omp.h>
#include <stdio.h>
#define R 100
#define C 100
main ()
{
double mat[R][C];
double vec[C];
double tmp;
int i=0,j=0;
/* Some initializations */
for(i=0; i<R; i++){
for(j=0; j<C; j++){
mat[i][j]=1;
}
}
for(i=0; i<C; i++)
vec[i]=1;
double start = omp_get_wtime();
#pragma omp parallel shared(tmp,mat,vec) private(i,j)
{
#pragma omp for schedule(static) reduction (+:tmp)
for(i=0; i<R; i++){
tmp=0;
for(j=0; j<C; j++){
#pragma omp atomic
tmp += mat[i][j]*vec[j];
}
printf("Resultado[%d]=%lf\n", i, tmp);
}
}
double end = omp_get_wtime();
printf("start time = %f\n",start);
printf("end time = %f\n",end);
printf("diff time = %f\n",end - start);
}
|
count_omp_threads.c | /******************************************************************************
* FILE: count_threads_omp.c
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
return 0;
}
|
argmax.h | /*
Authors: Mayank Rathee
Copyright:
Copyright (c) 2020 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef ARGMAX_H__
#define ARGMAX_H__
#include "NonLinear/relu-field.h"
#include "NonLinear/relu-ring.h"
template <typename type> class ArgMaxProtocol {
public:
sci::IOPack *iopack = nullptr;
sci::OTPack *otpack = nullptr;
ReLURingProtocol<type> *relu_oracle = nullptr;
ReLUFieldProtocol<type> *relu_field_oracle = nullptr;
int party;
int algeb_str;
int l, b;
int num_cmps;
uint64_t prime_mod;
uint8_t zero_small = 0;
uint64_t mask32_lower = (1ULL << 32) - 1ULL;
uint64_t mask32_upper = -1ULL - mask32_lower;
uint64_t mask_upper, mask_lower;
bool createdReluObj = false;
type mask_l;
// Constructor
ArgMaxProtocol(int party, int algeb_str, sci::IOPack *iopack, int l, int b,
uint64_t prime, sci::OTPack *otpack) {
this->party = party;
this->algeb_str = algeb_str;
this->iopack = iopack;
this->l = l;
mask_lower = (1ULL << this->l) - 1;
mask_upper = (1ULL << (2 * this->l)) - 1 - mask_lower;
this->b = b;
this->prime_mod = prime;
this->otpack = otpack;
if (algeb_str == RING) {
this->relu_oracle =
new ReLURingProtocol<type>(party, RING, iopack, l, b, otpack);
} else {
this->relu_field_oracle = new ReLUFieldProtocol<type>(
party, FIELD, iopack, l, b, this->prime_mod, otpack);
}
configure();
}
// Destructor
~ArgMaxProtocol() {
if (algeb_str == RING)
delete relu_oracle;
else
delete relu_field_oracle;
}
void configure() {
if (this->l != 32 && this->l != 64) {
mask_l = (type)((1ULL << l) - 1);
} else if (this->l == 32) {
mask_l = -1;
} else { // l = 64
mask_l = -1ULL;
}
}
int next_eight_multiple(int val) {
for (int i = 0; i < 8; i++) {
if ((val + i) % 8 == 0) {
return (val + i);
}
}
return 0;
}
void ArgMaxMPC(int size, type *inpArr, type *maxi, bool get_max_too = false,
type *max_val = nullptr) {
type *input_temp = new type[size + 16];
type *input_argmax_temp = new type[size + 16];
for (int i = 0; i < size; i++) {
input_temp[i] = inpArr[i];
input_argmax_temp[i] = 0;
}
if (party == sci::ALICE) {
for (type i = 0; i < (type)size; i++) {
input_argmax_temp[i] = i;
}
}
if (size & 1) {
input_temp[size] = input_temp[size - 1];
input_argmax_temp[size] = input_argmax_temp[size - 1];
size += 1;
}
type *compare_with = new type[size + 16];
type *compare_with_argmax = new type[size + 16];
type *relu_res = new type[size + 16];
type *argmax_res = new type[size + 16];
int no_of_nodes = size;
int no_of_nodes_child;
int pad1, pad2;
int times_stuck_on_8 = 0;
while (no_of_nodes > 1) {
// std::cout<<"#nodes = "<<no_of_nodes<<std::endl;
no_of_nodes_child = (int)ceil((float)no_of_nodes / (float)2);
pad1 = next_eight_multiple(no_of_nodes_child);
pad1 = pad1 - no_of_nodes_child;
pad2 = no_of_nodes + 2 * pad1;
no_of_nodes_child += pad1;
if (no_of_nodes_child == 8) {
times_stuck_on_8++;
}
if (times_stuck_on_8 >= 5) {
// The backend code only supports a minimum batch size of 8
// So, whenever we have less than 8 child nodes, we pad it to get 8
// nodes. The child nodes are = 8 in the following cases: #parentnodes =
// 16, 8, 4, 2, 1. We will get the argmax result when parent nodes = 1.
// So, times_stuck_on_8 >= 5.
break;
}
for (int i = no_of_nodes; i < pad2; i++) {
input_temp[i] = input_temp[no_of_nodes - 1];
input_argmax_temp[i] = input_argmax_temp[no_of_nodes - 1];
}
if (this->algeb_str == FIELD) {
for (int i = 0; i < (pad2); i += 2) {
compare_with[i / 2] = sci::neg_mod(
(int64_t)(input_temp[i] - input_temp[i + 1]), this->prime_mod);
compare_with_argmax[i / 2] = sci::neg_mod(
(int64_t)(input_argmax_temp[i] - input_argmax_temp[i + 1]),
this->prime_mod);
}
} else { // RING
for (int i = 0; i < (pad2); i += 2) {
compare_with[i / 2] = (input_temp[i] - input_temp[i + 1]);
compare_with_argmax[i / 2] =
(input_argmax_temp[i] - input_argmax_temp[i + 1]);
}
}
if (this->l > 32) {
argmax_this_level_super_32(argmax_res, relu_res, compare_with_argmax,
compare_with, no_of_nodes_child);
} else {
argmax_this_level_sub_32(argmax_res, relu_res, compare_with_argmax,
compare_with, no_of_nodes_child);
}
if (this->algeb_str == FIELD) {
for (int i = 0; i < (no_of_nodes_child); i++) {
input_temp[i] =
(relu_res[i] + input_temp[2 * i + 1]) % this->prime_mod;
input_argmax_temp[i] =
(argmax_res[i] + input_argmax_temp[2 * i + 1]) % this->prime_mod;
}
} else { // RING
for (int i = 0; i < (no_of_nodes_child); i++) {
input_temp[i] = (relu_res[i] + input_temp[2 * i + 1]) & mask_l;
input_argmax_temp[i] =
(argmax_res[i] + input_argmax_temp[2 * i + 1]) & mask_l;
}
}
no_of_nodes = no_of_nodes_child;
}
maxi[0] = input_argmax_temp[0];
if (get_max_too) {
max_val[0] = input_temp[0];
}
if (this->algeb_str == RING) {
maxi[0] &= mask_l;
if (get_max_too) {
max_val[0] &= mask_l;
}
}
delete[] argmax_res;
delete[] relu_res;
delete[] compare_with_argmax;
delete[] compare_with;
delete[] input_temp;
delete[] input_argmax_temp;
}
/**************************************************************************************************
* Compute ArgMax for a tree level
**************************************************************************************************/
void argmax_this_level_super_32(type *argmax, type *result, type *indexshare,
type *share, int num_relu) {
uint8_t *drelu_ans = new uint8_t[num_relu];
if (this->algeb_str == FIELD) {
relu_field_oracle->relu(result, share, num_relu, drelu_ans, true);
} else { // RING
relu_oracle->relu(result, share, num_relu, drelu_ans, true);
}
// Now perform x.msb(x)
// 2 OTs required with reversed roles
sci::block128 *ot_messages_0 = new sci::block128[num_relu];
sci::block128 *ot_messages_1 = new sci::block128[num_relu];
uint64_t *additive_masks = new uint64_t[num_relu * 2];
sci::block128 *received_shares = new sci::block128[num_relu];
uint64_t *received_shares_0 = new uint64_t[num_relu];
uint64_t *received_shares_1 = new uint64_t[num_relu];
if (this->algeb_str == FIELD) {
this->relu_field_oracle->triple_gen->prg->template random_mod_p<type>(
(type *)additive_masks, 2 * num_relu, this->prime_mod);
for (int i = 0; i < 2 * num_relu; i++) {
additive_masks[i] %= this->prime_mod;
}
} else { // RING
this->relu_oracle->triple_gen->prg->random_data(
additive_masks, 2 * num_relu * sizeof(type));
}
for (int i = 0; i < num_relu; i++) {
set_argmax_end_ot_messages_super_32(
ot_messages_0 + i, ot_messages_1 + i, share + i, indexshare + i,
drelu_ans + i, ((type *)additive_masks) + i, num_relu);
}
#pragma omp parallel num_threads(2)
{
if (omp_get_thread_num() == 1) {
if (party == sci::ALICE) {
if (this->algeb_str == FIELD) {
relu_field_oracle->otpack->iknp_reversed->recv(
received_shares, (bool *)drelu_ans, num_relu);
} else {
relu_oracle->otpack->iknp_reversed->recv(
received_shares, (bool *)drelu_ans, num_relu);
}
} else { // party == sci::BOB
if (this->algeb_str == FIELD) {
relu_field_oracle->otpack->iknp_reversed->send(
ot_messages_0, ot_messages_1, num_relu);
} else {
relu_oracle->otpack->iknp_reversed->send(ot_messages_0,
ot_messages_1, num_relu);
}
}
} else {
if (party == sci::ALICE) {
if (this->algeb_str == FIELD) {
relu_field_oracle->otpack->iknp_straight->send(
ot_messages_0, ot_messages_1, num_relu);
} else {
relu_oracle->otpack->iknp_straight->send(ot_messages_0,
ot_messages_1, num_relu);
}
} else { // party == sci::BOB
if (this->algeb_str == FIELD) {
relu_field_oracle->otpack->iknp_straight->recv(
received_shares, (bool *)drelu_ans, num_relu);
} else {
relu_oracle->otpack->iknp_straight->recv(
received_shares, (bool *)drelu_ans, num_relu);
}
}
}
}
for (int i = 0; i < num_relu; i++) {
received_shares_0[i] = _mm_extract_epi64(received_shares[i], 1);
received_shares_1[i] = _mm_extract_epi64(received_shares[i], 0);
}
for (int i = 0; i < num_relu; i++) {
result[i] = ((type *)additive_masks)[i] +
((type *)received_shares_0)[(8 / sizeof(type)) * i];
argmax[i] = ((type *)additive_masks)[i + num_relu] +
((type *)received_shares_1)[(8 / sizeof(type)) * i];
if (this->algeb_str == FIELD) {
result[i] %= this->prime_mod;
argmax[i] %= this->prime_mod;
}
}
delete[] additive_masks;
delete[] received_shares;
delete[] received_shares_0;
delete[] received_shares_1;
delete[] ot_messages_0;
delete[] ot_messages_1;
delete[] drelu_ans;
}
void set_argmax_end_ot_messages_super_32(sci::block128 *ot_messages_0,
sci::block128 *ot_messages_1,
type *value_share, type *index_share,
uint8_t *xor_share,
type *additive_mask, int num_relu) {
type temp0, temp1, temp2, temp3;
if (this->algeb_str == FIELD) {
temp0 = sci::neg_mod((int64_t)value_share[0] - (int64_t)additive_mask[0],
this->prime_mod);
temp1 = sci::neg_mod((int64_t)0LL - (int64_t)additive_mask[0],
this->prime_mod);
temp2 = sci::neg_mod((int64_t)index_share[0] -
(int64_t)additive_mask[0 + num_relu],
this->prime_mod);
temp3 = sci::neg_mod((int64_t)0LL - (int64_t)additive_mask[0 + num_relu],
this->prime_mod);
} else { // RING
temp0 = (value_share[0] - additive_mask[0]);
temp1 = (0 - additive_mask[0]);
temp2 = (index_share[0] - additive_mask[0 + num_relu]);
temp3 = (0 - additive_mask[0 + num_relu]);
}
if (*xor_share == zero_small) {
ot_messages_0[0] = sci::makeBlock128(0ULL + temp0, 0ULL + temp2);
ot_messages_1[0] = sci::makeBlock128(0ULL + temp1, 0ULL + temp3);
} else {
ot_messages_0[0] = sci::makeBlock128(0ULL + temp1, 0ULL + temp3);
ot_messages_1[0] = sci::makeBlock128(0ULL + temp0, 0ULL + temp2);
}
}
void argmax_this_level_sub_32(type *argmax, type *result, type *indexshare,
type *share, int num_relu) {
uint8_t *drelu_ans = new uint8_t[num_relu];
if (this->algeb_str == FIELD) {
relu_field_oracle->relu(result, share, num_relu, drelu_ans, true);
} else { // RING
relu_oracle->relu(result, share, num_relu, drelu_ans, true);
}
// Now perform x.msb(x)
// 2 OTs required with reversed roles
uint64_t **ot_messages = new uint64_t *[num_relu];
for (int i = 0; i < num_relu; i++) {
ot_messages[i] = new uint64_t[2];
}
uint64_t *additive_masks = new uint64_t[2 * num_relu];
uint64_t *received_shares = new uint64_t[num_relu];
uint64_t *received_shares_0 = new uint64_t[num_relu];
uint64_t *received_shares_1 = new uint64_t[num_relu];
if (this->algeb_str == FIELD) {
this->relu_field_oracle->triple_gen->prg->template random_mod_p<type>(
(type *)additive_masks, 2 * num_relu, this->prime_mod);
for (int i = 0; i < 2 * num_relu; i++) {
additive_masks[i] %= this->prime_mod;
}
} else { // RING
this->relu_oracle->triple_gen->prg->random_data(
additive_masks, 2 * num_relu * sizeof(type));
}
if (party == sci::ALICE) {
for (int i = 0; i < num_relu; i++) {
set_argmax_end_ot_messages_sub_32(
ot_messages[i], share + i, indexshare + i, drelu_ans + i,
((type *)additive_masks) + i, num_relu);
}
if (this->algeb_str == FIELD) {
relu_field_oracle->otpack->iknp_straight->send(ot_messages, num_relu,
this->l * 2);
relu_field_oracle->otpack->iknp_reversed->recv(
received_shares, drelu_ans, num_relu, this->l * 2);
} else { // RING
relu_oracle->otpack->iknp_straight->send(ot_messages, num_relu, 64);
relu_oracle->otpack->iknp_reversed->recv(received_shares, drelu_ans,
num_relu, 64);
}
} else // party = sci::BOB
{
for (int i = 0; i < num_relu; i++) {
set_argmax_end_ot_messages_sub_32(
ot_messages[i], share + i, indexshare + i, drelu_ans + i,
((type *)additive_masks) + i, num_relu);
}
if (this->algeb_str == FIELD) {
relu_field_oracle->otpack->iknp_straight->recv(
received_shares, drelu_ans, num_relu, this->l * 2);
relu_field_oracle->otpack->iknp_reversed->send(ot_messages, num_relu,
this->l * 2);
} else { // RING
relu_oracle->otpack->iknp_straight->recv(received_shares, drelu_ans,
num_relu, 64);
relu_oracle->otpack->iknp_reversed->send(ot_messages, num_relu, 64);
}
}
for (int i = 0; i < num_relu; i++) {
if (this->algeb_str == FIELD) {
// tightly optimized communication for the field case
received_shares_0[i] = (received_shares[i] & mask_upper) >> this->l;
received_shares_1[i] = received_shares[i] & mask_lower;
} else { // RING
received_shares_0[i] = (received_shares[i] & mask32_upper) >> 32;
received_shares_1[i] = received_shares[i] & mask32_lower;
}
}
for (int i = 0; i < num_relu; i++) {
result[i] = ((type *)additive_masks)[i] +
((type *)received_shares_0)[(8 / sizeof(type)) * i];
argmax[i] = ((type *)additive_masks)[i + num_relu] +
((type *)received_shares_1)[(8 / sizeof(type)) * i];
if (this->algeb_str == FIELD) {
result[i] %= this->prime_mod;
argmax[i] %= this->prime_mod;
}
}
delete[] additive_masks;
delete[] received_shares;
delete[] received_shares_0;
delete[] received_shares_1;
delete[] drelu_ans;
for (int i = 0; i < num_relu; i++) {
delete[] ot_messages[i];
}
delete[] ot_messages;
}
void set_argmax_end_ot_messages_sub_32(uint64_t *ot_messages,
type *value_share, type *index_share,
uint8_t *xor_share,
type *additive_mask, int num_relu) {
uint64_t temp0, temp1, temp2, temp3;
uint64_t mask_upper_general, mask_lower_general;
if (this->algeb_str == FIELD) {
temp0 = sci::neg_mod((int64_t)value_share[0] - (int64_t)additive_mask[0],
this->prime_mod);
temp1 = sci::neg_mod((int64_t)0LL - (int64_t)additive_mask[0],
this->prime_mod);
temp2 = sci::neg_mod((int64_t)index_share[0] -
(int64_t)additive_mask[0 + num_relu],
this->prime_mod);
temp3 = sci::neg_mod((int64_t)0LL - (int64_t)additive_mask[0 + num_relu],
this->prime_mod);
temp0 = temp0 << this->l;
temp1 = temp1 << this->l;
mask_upper_general = mask_upper;
mask_lower_general = mask_lower;
} else { // RING
temp0 = (type)(value_share[0] - additive_mask[0]);
temp0 = temp0 << 32;
temp1 = (type)(0 - additive_mask[0]);
temp1 = temp1 << 32;
temp2 = (type)(index_share[0] - additive_mask[0 + num_relu]);
temp3 = (type)(0 - additive_mask[0 + num_relu]);
mask_upper_general = mask32_upper;
mask_lower_general = mask32_lower;
}
if (*xor_share == zero_small) {
ot_messages[0] = (mask_upper_general & (0ULL + temp0)) ^
(mask_lower_general & (0ULL + temp2));
ot_messages[1] = (mask_upper_general & (0ULL + temp1)) ^
(mask_lower_general & (0ULL + temp3));
} else {
ot_messages[0] = (mask_upper_general & (0ULL + temp1)) ^
(mask_lower_general & (0ULL + temp3));
ot_messages[1] = (mask_upper_general & (0ULL + temp0)) ^
(mask_lower_general & (0ULL + temp2));
}
}
};
#endif // ARGMAX_H__
|
cglobals.h | #ifndef RTGLOBALS
#define RTGLOBALS
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
#define WARP_SIZE 32
#define Z_ORDER_BLOCK_SIZE 16
#define CMP_RESULTS_BLOCK_SIZE 256
#define HRT_RAY_MISS 0xFFFFFFFE
#define HRT_RAY_HIT 0xFFFFFFFF
#define GMAXVARS 64
#define INVALID_TEXTURE 0xFFFFFFFE
#define TEX_POINT_SAM 1
#define TEX_ALPHASRC_W 2
#define TEX_CLAMP_U 4
#define TEX_CLAMP_V 8
#define TEX_COORD_SECOND 16
#define TEX_COORD_CAM_PROJ 32
// they are related because data are storen in one int32 variable triAlphaTest
//
#define ALPHA_MATERIAL_MASK 0x00FFFFFF
#define ALPHA_LIGHTMESH_MASK 0xFF000000
#define ALPHA_LIGHTMESH_SHIFT 24
#define ALPHA_OPACITY_TEX_HAPPEND 0x80000000
#define ALPHA_TRANSPARENCY_HAPPEND 0x40000000
#define TEXMATRIX_ID_MASK 0x00FFFFFF // for texture slots - 'color_texMatrixId' and e.t.c
#define TEXSAMPLER_TYPE_MASK 0xFF000000 // for texture slots - 'color_texMatrixId' and e.t.c
#ifndef M_HALFPI
#define M_HALFPI 1.57079632679489661923f
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#ifndef M_TWOPI
#define M_TWOPI 6.28318530717958647692f
#endif
#ifndef INV_PI
#define INV_PI 0.31830988618379067154f
#endif
#ifndef INV_TWOPI
#define INV_TWOPI 0.15915494309189533577f
#endif
#ifndef INV_FOURPI
#define INV_FOURPI 0.07957747154594766788f
#endif
#ifndef DEG_TO_RAD
#define DEG_TO_RAD (3.14159265358979323846f / 180.f)
#endif
#define GEPSILON 5e-6f
#define DEPSILON 1e-20f
#define DEPSILON2 1e-30f
#define PEPSILON 0.025f
#define PG_SCALE 1000.0f
/**
* These defines are for the QMC remap table to support different mappings in run time.
* for example you may decide to map (0,1) to screen (x,y) and (2,3) to DOF (x,y) or
* you may decide to map (0,1) to screen (x,y) and (2,3,4) to material sampling
* if no mapping presents in the table (id == -1) then pseudo random should be used.
*
*/
#define QMC_VAR_SCR_X 0
#define QMC_VAR_SCR_Y 1
#define QMC_VAR_DOF_X 2
#define QMC_VAR_DOF_Y 3
#define QMC_VAR_SRC_A 4
#define QMC_VAR_MAT_L 5
#define QMC_VAR_MAT_0 6
#define QMC_VAR_MAT_1 7
#define QMC_VAR_LGT_N 8
#define QMC_VAR_LGT_0 9
#define QMC_VAR_LGT_1 10
#define QMC_VAR_LGT_2 11
/**
* Note that unlike QMC, MMLT don't use remap table.
* These offsets are direct offsets in the ramdom vector table (in floats)
*
*/
#define MMLT_HEAD_TOTAL_SIZE 12 //
// [0-3] : LENS; 4 in total
//
#define MMLT_DIM_SCR_X 0
#define MMLT_DIM_SCR_Y 1
#define MMLT_DIM_DOF_X 2
#define MMLT_DIM_DOF_Y 3
// [4-10]: LIGHT; 7 in total
//
#define MMLT_DIM_LGT_X 4
#define MMLT_DIM_LGT_Y 5
#define MMLT_DIM_LGT_Z 6
#define MMLT_DIM_LGT_W 7
#define MMLT_DIM_LGT_X1 8
#define MMLT_DIM_LGT_Y1 9
#define MMLT_DIM_LGT_N 10
// [11] : SPLIT;
//
#define MMLT_DIM_SPLIT 11
#define MMLT_FLOATS_PER_MLAYER 7
#define MMLT_FLOATS_PER_SAMPLE 3
#define MMLT_FLOATS_PER_BOUNCE (MMLT_FLOATS_PER_SAMPLE + MMLT_FLOATS_PER_MLAYER)
#define MMLT_COMPRESSED_F_PERB 6
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define KMLT_HEAD_SIZE 4
#define KMLT_PER_LIGHT 4
#define KMLT_PER_MATERIAL 6
enum MEGATEX_USAGE{ MEGATEX_SHADING = 1,
MEGATEX_SHADING_HDR = 2,
MEGATEX_NORMAL = 3,
MEGATEX_OPACITY = 4,
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __CUDACC__
#else
#ifdef OCL_COMPILER // OpenCL
#define ALIGN_S(x) __attribute__ ((aligned (x)))
static inline ushort2 make_ushort2(ushort x, ushort y) { ushort2 res; res.x = x; res.y = y; return res; }
static inline int2 make_int2(int a, int b) { int2 res; res.x = a; res.y = b; return res; }
static inline int4 make_int4(int a, int b, int c, int d) { int4 res; res.x = a; res.y = b; res.z = c; res.w = d; return res; }
#define GLOBAL_ID_X get_global_id(0)
#define GLOBAL_ID_Y get_global_id(1)
#define LOCAL_ID_X get_local_id(0)
#define LOCAL_ID_Y get_local_id(1)
#define _PACKED __attribute__ ((packed))
#define __device__
//#define SYNCTHREADS barrier(CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE)
#define SYNCTHREADS_LOCAL barrier(CLK_LOCAL_MEM_FENCE)
#define SYNCTHREADS_GLOBAL barrier(CLK_GLOBAL_MEM_FENCE)
static inline float maxcomp(float3 v) { return fmax(v.x, fmax(v.y, v.z)); }
#define NULL 0
static inline ushort4 make_ushort4(ushort a, ushort b, ushort c, ushort d)
{
ushort4 res;
res.x = a;
res.y = b;
res.z = c;
res.w = d;
return res;
}
static inline void atomic_addf(volatile __global float *source, const float operand)
{
union {
unsigned int intVal;
float floatVal;
} newVal;
union {
unsigned int intVal;
float floatVal;
} prevVal;
do {
prevVal.floatVal = *source;
newVal.floatVal = prevVal.floatVal + operand;
} while (atomic_cmpxchg((volatile global unsigned int *)source, prevVal.intVal, newVal.intVal) != prevVal.intVal);
}
static inline float dot3 (const float4 u, const float4 v) { return (u.x*v.x + u.y*v.y + u.z*v.z); }
typedef struct float4x4T
{
float4 m_col[4];
} float4x4;
typedef struct float3x3T
{
float3 row[3];
} float3x3;
static inline float2 make_float2(float a, float b)
{
float2 res;
res.x = a;
res.y = b;
return res;
}
static inline float3 make_float3(float a, float b, float c)
{
float3 res;
res.x = a;
res.y = b;
res.z = c;
return res;
}
static inline float4 make_float4(float a, float b, float c, float d)
{
float4 res;
res.x = a;
res.y = b;
res.z = c;
res.w = d;
return res;
}
static inline float2 to_float2(float4 f4)
{
float2 res;
res.x = f4.x;
res.y = f4.y;
return res;
}
static inline float3 to_float3(float4 f4)
{
float3 res;
res.x = f4.x;
res.y = f4.y;
res.z = f4.z;
return res;
}
static inline float4 to_float4(float3 v, float w)
{
float4 res;
res.x = v.x;
res.y = v.y;
res.z = v.z;
res.w = w;
return res;
}
static inline float3 mul4x3(float4x4 m, float3 v)
{
float3 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x + m.m_col[3].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y + m.m_col[3].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z + m.m_col[3].z;
return res;
}
static inline float3 mul3x3(float4x4 m, float3 v)
{
float3 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z;
return res;
}
static inline float2 sincos2f(float a_value)
{
float cosVal;
float sinVal = sincos(a_value, &cosVal);
return make_float2(sinVal, cosVal);
}
#else // Common C++
#ifdef WIN32
#define ALIGN_S(x) __declspec(align(x))
#else
#define ALIGN_S(x) __attribute__ ((aligned (x)))
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#include "../../HydraAPI/hydra_api/LiteMath.h"
using namespace LiteMath;
#include "../../HydraAPI/hydra_api/HR_HDRImage.h"
typedef HydraRender::HDRImage4f HDRImage4f;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef struct float3x3T
{
float3 row[3];
} float3x3;
static inline float2 sincos2f(float a_value)
{
return make_float2(sin(a_value), cos(a_value));
}
#define __global
#define __constant const
#define __private
#define __read_only
typedef int image1d_t;
typedef int image1d_buffer_t;
typedef int image2d_t;
typedef int sampler_t;
const int CLK_NORMALIZED_COORDS_TRUE = 1;
const int CLK_NORMALIZED_COORDS_FALSE = 2;
const int CLK_ADDRESS_CLAMP = 4;
const int CLK_FILTER_NEAREST = 8;
const int CLK_FILTER_LINEAR = 16;
const int CLK_ADDRESS_REPEAT = 32;
#define COMMON_CPLUS_PLUS_CODE 1
static inline int as_int(float x) { return reinterpret_cast<int&> (x); }
static inline float as_float(int x) { return reinterpret_cast<float&>(x); }
#define _PACKED
typedef unsigned short half;
static inline void vstore_half(float data, size_t offset, __global half *p) { p[offset] = 0; }
static inline float sign(float a) { return (a > 0.0f) ? 1.0f : -1.0f; }
static inline int2 make_int2(int a, int b) { int2 res; res.x = a; res.y = b; return res; }
using std::isinf;
#define ENABLE_OPACITY_TEX 1
#define SHADOW_TRACE_COLORED_SHADOWS 1
#define ENABLE_BLINN 1
#include "globals_sys.h"
#endif
#endif
typedef __global const int4* texture2d_t;
#ifndef INFINITY
#define INFINITY (1e38f)
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct MatSampleT
{
float3 color;
float3 direction;
float pdf;
int flags;
} MatSample;
enum FLAG_BITS{HRT_COMPUTE_SHADOWS = 1,
HRT_DISABLE_SHADING = 2,
HRT_DIRECT_LIGHT_MODE = 4,
HRT_UNIFIED_IMAGE_SAMPLING = 8,
HRT_PRODUCTION_IMAGE_SAMPLING = 16, // 256 coherent rays per pixel.
HRT_USE_MIS = 32,
HRT_DUMMY2 = 64, // !!!!!!!! DONT USE THIS FLAG !!!! UNKNOWN BUG, PT DOES NOT CONTRIBUTE TO SCREEN.
HRT_STORE_SUBPIXELS = 128,
HRT_FORWARD_TRACING = 256, /// tracing from light to eye; otherwise from eye to light.
HRT_DRAW_LIGHT_LT = 512,
HRT_3WAY_MIS_WEIGHTS = 1024,
HRT_STORE_RAY_SAMPLES = 8192,
HRT_ENABLE_MMLT = 16384,
HRT_ENABLE_SBPT = 32768,
HRT_INDIRECT_LIGHT_MODE = 65536,
HRT_STUPID_PT_MODE = 65536*8,
HRT_NO_RANDOM_LIGHTS_SELECT = 65536*16,
HRT_DUMMY5 = 65536*32, //
HRT_DUMMY6 = 65536*64, // tracing photons to form spetial photonmap to speed-up direct light sampling
HRT_DUMMY7 = 65536*128,
HRT_DUMMY8 = 65536*256,
HRT_ENABLE_PT_CAUSTICS = 65536*2048,
HRT_USE_BOTH_PHOTON_MAPS = 65536*4096,
HRT_ENABLE_QMC_ONE_SEED = 65536*8192, // !!!!!!!! DONT MOVE THIS FLAG !!!! See random generator implementation
HRT_ENABLE_COHERENT_PT = 65536*16384,
};
enum BACK_MODE {MODE_CAM_PROJECTED = 0, MODE_SPHERICAL = 1};
enum VARIABLE_NAMES { // int vars
//
HRT_ENABLE_DOF = 0,
HRT_QMC_VARIANT = 1,
HRT_FIRST_BOUNCE_STORE_CACHE = 2,
HRT_ENABLE_MRAYS_COUNTERS = 3,
HRT_DEBUG_OUTPUT = 4,
HRT_MEASURE_RAYS_TYPE = 5,
HRT_BLACK_DIFFUSE_OFFSET = 6,
HRT_STORE_SHADOW_COLOR_W = 7,
HRT_WHITE_DIFFUSE_OFFSET = 8,
HRT_TRACE_DEPTH = 9,
HRT_PHOTONS_STORE_BOUNCE = 10,
HRT_PHOTONS_GARTHER_BOUNCE = 11,
HRT_RAYS_APPENDBUFFER_SIZE = 12,
HRT_DIFFUSE_TRACE_DEPTH = 13,
HRT_DISPLAY_IC_INTERMEDIATE = 14,
HRT_PT_FILTER_TYPE = 15,
HRT_ENABLE_BAKE = 16,
HRT_SILENT_MODE = 17,
HRT_VAR_ENABLE_RR = 18,
HRT_RENDER_LAYER = 19,
HRT_RENDER_LAYER_DEPTH = 20,
HRT_IC_ENABLED = 21,
HRT_IMAP_ENABLED = 22,
HRT_SPHEREMAP_TEXID0 = 23,
HRT_SPHEREMAP_TEXID1 = 24,
HRT_USE_GAMMA_FOR_ENV = 25,
HRT_HRT_SCENE_HAVE_PORTALS = 26,
HRT_SPHEREMAP_TEXMATRIXID0 = 27,
HRT_SPHEREMAP_TEXMATRIXID1 = 28,
HRT_ENABLE_PATH_REGENERATE = 29,
HRT_ENV_PDF_TABLE_ID = 30,
HRT_MLT_MAX_NUMBERS = 31,
HRT_MLT_ITERS_MULT = 32,
HRT_MMLT_BURN_ITERS = 33,
HRT_MMLT_FIRST_BOUNCE = 34,
HRT_SHADOW_MATTE_BACK = 35,
HRT_MAX_SAMPLES_PER_PIXEL = 36,
HRT_CONTRIB_SAMPLES = 37,
HRT_BOX_MODE_ON = 38,
HRT_KMLT_OR_QMC_LGT_BOUNCES = 39,
HRT_KMLT_OR_QMC_MAT_BOUNCES = 40,
HRT_SHADOW_MATTE_BACK_MODE = 41,
HRT_SHADOW_MATTE_BACK_COLOR_X= 42,
HRT_SHADOW_MATTE_BACK_COLOR_Y= 43,
HRT_SHADOW_MATTE_BACK_COLOR_Z= 44,
};
enum VARIABLE_FLOAT_NAMES{ // float vars
//
HRT_DOF_LENS_RADIUS = 0,
HRT_DOF_FOCAL_PLANE_DIST = 1,
HRT_TILT_ROT_X = 2,
HRT_TRACE_PROCEEDINGS_TRESHOLD = 3,
HRT_TILT_ROT_Y = 4,
HRT_CAUSTIC_POWER_MULT = 5,
HRT_IMAGE_GAMMA = 6,
HRT_TEXINPUT_GAMMA = 7,
HRT_ENV_COLOR_X = 8,
HRT_ENV_COLOR_Y = 9,
HRT_ENV_COLOR_Z = 10,
HRT_ENV_COLOR2_X = 11,
HRT_ENV_COLOR2_Y = 12,
HRT_ENV_COLOR2_Z = 13,
HRT_CAM_FOV = 14,
HRT_PATH_TRACE_ERROR = 15,
HRT_PATH_TRACE_CLAMPING = 16,
HRT_DUMMY_VARIABLE = 17,
HRT_BSPHERE_CENTER_X = 18,
HRT_BSPHERE_CENTER_Y = 19,
HRT_BSPHERE_CENTER_Z = 20,
HRT_BSPHERE_RADIUS = 21,
HRT_GVOXEL_SIZE = 22,
HRT_FOV_X = 23, // viewport parameters
HRT_FOV_Y = 24,
HRT_WIDTH_F = 25,
HRT_HEIGHT_F = 26,
HRT_ABLOW_OFFSET_X = 27,
HRT_ABLOW_OFFSET_Y = 28,
HRT_ABLOW_SCALE_X = 29,
HRT_ABLOW_SCALE_Y = 30,
HRT_MMLT_IMPLICIT_FIXED_PROB = 31,
HRT_MMLT_STEP_SIZE_POWER = 32,
HRT_MMLT_STEP_SIZE_COEFF = 33,
HRT_MLT_SCREEN_SCALE_X = 34,
HRT_MLT_SCREEN_SCALE_Y = 35,
HRT_BACK_TEXINPUT_GAMMA = 36,
};
enum RENDER_LAYER {
LAYER_COLOR = 0,
LAYER_POSITIONS = 1,
LAYER_NORMALS = 2,
LAYER_TEXCOORD = 3,
LAYER_TEXCOLOR_AND_MATERIAL = 4, // material mask
LAYER_INCOMING_PRIMARY = 5, // incoming primary
LAYER_INCOMING_RADIANCE = 6, // incoming secondary
LAYER_COLOR_PRIMARY_AND_REST = 7, // primary + refractions and other bounces
LAYER_COLOR_THE_REST = 8,
LAYER_PRIMARY = 9,
LAYER_SECONDARY = 10
}; // refractions, and other bounces
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static inline uint ZIndex(ushort x, ushort y, __constant ushort* a_mortonTable256)
{
return (a_mortonTable256[y >> 8] << 17) |
(a_mortonTable256[x >> 8] << 16) |
(a_mortonTable256[y & 0xFF] << 1 ) |
(a_mortonTable256[x & 0xFF] );
}
static inline ushort ExtractFromZIndex3D(uint zIndex, int stride)
{
uint result = 0;
for (int i = 0; i < 10; i++)
{
int bitBask = 1 << (3 * i + stride);
int bit = (bitBask & zIndex) ? 1 : 0;
result |= (bit << i);
}
return (ushort)result;
}
static inline ushort ExtractFromZIndex2D(uint zIndex, int stride)
{
uint result = 0;
for (int i = 0; i < 16; i++)
{
int bitBask = 1 << (2 * i + stride);
int bit = (bitBask & zIndex) ? 1 : 0;
result |= (bit << i);
}
return (ushort)result;
}
static inline ushort ExtractXFromZIndex(uint zIndex)
{
uint result = 0;
for (int i = 0; i<16; i++)
result |= ((1 << (2 * i)) & zIndex) >> i;
return (ushort)result;
}
static inline ushort ExtractYFromZIndex(uint zIndex)
{
uint result = 0;
for (int i = 0; i<16; i++)
result |= ((1 << (2 * i + 1)) & zIndex) >> i;
return (ushort)(result >> 1);
}
static inline int blocks(int elems, int threadsPerBlock)
{
if (elems % threadsPerBlock == 0 && elems >= threadsPerBlock)
return elems / threadsPerBlock;
else
return (elems / threadsPerBlock) + 1;
}
static inline size_t blocksST(size_t elems, int threadsPerBlock)
{
if (elems % threadsPerBlock == 0 && elems >= threadsPerBlock)
return elems / threadsPerBlock;
else
return (elems / threadsPerBlock) + 1;
}
static inline size_t roundBlocks(size_t elems, int threadsPerBlock)
{
if (elems < threadsPerBlock)
return (size_t)threadsPerBlock;
else
return blocksST(elems, threadsPerBlock) * threadsPerBlock;
}
static inline uint Index2D(uint x, uint y, int pitch) { return y*pitch + x; }
static inline uint IndexZBlock2D(int x, int y, int pitch, __constant ushort* a_mortonTable) // window_size[0]
{
uint zOrderX = x % Z_ORDER_BLOCK_SIZE;
uint zOrderY = y % Z_ORDER_BLOCK_SIZE;
uint zIndex = ZIndex(zOrderX, zOrderY, a_mortonTable);
uint wBlocks = pitch / Z_ORDER_BLOCK_SIZE;
uint blockX = x / Z_ORDER_BLOCK_SIZE;
uint blockY = y / Z_ORDER_BLOCK_SIZE;
return (blockX + (blockY)*(wBlocks))*Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE + zIndex;
}
static inline ushort2 GetXYFromZBlockIndex(uint a_offset, int w, int h)
{
int blocksSizeX = w / Z_ORDER_BLOCK_SIZE;
//int blocksSizeY = h / Z_ORDER_BLOCK_SIZE;
int blockId = a_offset / (Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE);
int zIdInBlock = a_offset % (Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE);
int blockY = blockId / blocksSizeX;
int blockX = blockId - blockY*blocksSizeX;
int localX = (int)ExtractXFromZIndex(zIdInBlock);
int localY = (int)ExtractYFromZIndex(zIdInBlock);
ushort2 res;
res.x = (ushort)(blockX*Z_ORDER_BLOCK_SIZE + localX);
res.y = (ushort)(blockY*Z_ORDER_BLOCK_SIZE + localY);
return res;
}
static inline uint SpreadBits(int x, int offset)
{
x = (x | (x << 10)) & 0x000F801F;
x = (x | (x << 4)) & 0x00E181C3;
x = (x | (x << 2)) & 0x03248649;
x = (x | (x << 2)) & 0x09249249;
return (uint)(x) << offset;
}
static inline uint GetMortonNumber(int x, int y, int z)
{
return SpreadBits(x, 0) | SpreadBits(y, 1) | SpreadBits(z, 2);
}
static inline float3 reflect(float3 dir, float3 normal)
{
// Do not use this function for "wo" and "wh" microfacets terms.
// They need the formula: 2.0f * dot(wo, wh) * wh - wo;
return normalize((normal * dot(dir, normal) * (-2.0f)) + dir);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
///// a simple tone mapping
static inline float3 ToneMapping(float3 color) { return make_float3(fmin(color.x, 1.0f), fmin(color.y, 1.0f), fmin(color.z, 1.0f)); }
static inline float4 ToneMapping4(float4 color) { return make_float4(fmin(color.x, 1.0f), fmin(color.y, 1.0f), fmin(color.z, 1.0f), fmin(color.w, 1.0f)); }
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////
static inline uint RealColorToUint32_f3(float3 real_color)
{
float r = real_color.x*255.0f;
float g = real_color.y*255.0f;
float b = real_color.z*255.0f;
unsigned char red = (unsigned char)r, green = (unsigned char)g, blue = (unsigned char)b;
return red | (green << 8) | (blue << 16) | 0xFF000000;
}
static inline uint RealColorToUint32(float4 real_color)
{
float r = real_color.x*255.0f;
float g = real_color.y*255.0f;
float b = real_color.z*255.0f;
float a = real_color.w*255.0f;
unsigned char red = (unsigned char)r;
unsigned char green = (unsigned char)g;
unsigned char blue = (unsigned char)b;
unsigned char alpha = (unsigned char)a;
return red | (green << 8) | (blue << 16) | (alpha << 24);
}
static inline float3 SafeInverse(float3 d)
{
const float ooeps = 1.0e-36f; // Avoid div by zero.
float3 res;
res.x = 1.0f / (fabs(d.x) > ooeps ? d.x : copysign(ooeps, d.x));
res.y = 1.0f / (fabs(d.y) > ooeps ? d.y : copysign(ooeps, d.y));
res.z = 1.0f / (fabs(d.z) > ooeps ? d.z : copysign(ooeps, d.z));
return res;
}
static inline float epsilonOfPos(float3 hitPos) { return fmax(fmax(fabs(hitPos.x), fmax(fabs(hitPos.y), fabs(hitPos.z))), 2.0f*GEPSILON)*GEPSILON; }
static inline float misHeuristicPower1(float p) { return isfinite(p) ? fabs(p) : 0.0f; }
static inline float misHeuristicPower2(float p) { return isfinite(p*p) ? p*p : 0.0f; }
static inline float misWeightHeuristic(float a, float b)
{
const float w = misHeuristicPower1(a) / fmax(misHeuristicPower1(a) + misHeuristicPower1(b), DEPSILON2);
return isfinite(w) ? w : 0.0f;
}
static inline float misWeightHeuristic3(float a, float b, float c)
{
const float w = fabs(a) / fmax(misHeuristicPower1(a) + misHeuristicPower1(b) + misHeuristicPower1(c), DEPSILON2);
if(!isfinite(a))
return 1.0f;
else
return isfinite(w) ? w : 0.0f;
}
/**
\brief offset reflected ray position by epsilon;
\param a_hitPos - world space position on surface
\param a_surfaceNorm - surface normal at a_hitPos
\param a_sampleDir - ray direction in which we are going to trace reflected ray
\return offseted ray position
*/
static inline float3 OffsRayPos(const float3 a_hitPos, const float3 a_surfaceNorm, const float3 a_sampleDir)
{
const float signOfNormal2 = dot(a_sampleDir, a_surfaceNorm) < 0.0f ? -1.0f : 1.0f;
const float offsetEps = epsilonOfPos(a_hitPos);
return a_hitPos + signOfNormal2*offsetEps*a_surfaceNorm;
}
/**
\brief offset reflected ray position by epsilon;
\param a_hitPos - world space position on surface
\param a_surfaceNorm - surface normal at a_hitPos
\param a_sampleDir - ray direction in which we are going to trace reflected ray
\param a_shadowOffsAux - per poly auxilarry shadow offset.
\return offseted ray position
*/
static inline float3 OffsShadowRayPos(const float3 a_hitPos, const float3 a_surfaceNorm, const float3 a_sampleDir, const float a_shadowOffsAux)
{
const float signOfNormal2 = dot(a_sampleDir, a_surfaceNorm) < 0.0f ? -1.0f : 1.0f;
const float offsetEps = epsilonOfPos(a_hitPos);
return a_hitPos + signOfNormal2*(offsetEps + a_shadowOffsAux)*a_surfaceNorm;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static inline float4x4 make_float4x4(__global const float* a_data)
{
float4x4 matrix;
matrix.m_col[0] = make_float4(a_data[0], a_data[1], a_data[2], a_data[3]);
matrix.m_col[1] = make_float4(a_data[4], a_data[5], a_data[6], a_data[7]);
matrix.m_col[2] = make_float4(a_data[8], a_data[9], a_data[10], a_data[11]);
matrix.m_col[3] = make_float4(a_data[12], a_data[13], a_data[14], a_data[15]);
return matrix;
}
static inline float4x4 make_matrix_rotationX(float a_angle)
{
const float sinx = sin(a_angle);
const float cosx = cos(a_angle);
float4x4 res;
res.m_col[0] = make_float4(1.0f, 0.0f, 0.0f, 0.0f);
res.m_col[1] = make_float4(0.0f, +cosx, +sinx, 0.0f);
res.m_col[2] = make_float4(0.0f, -sinx, +cosx, 0.0f);
res.m_col[3] = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
return res;
}
static inline float4x4 make_matrix_rotationY(float a_angle)
{
const float siny = sin(a_angle);
const float cosy = cos(a_angle);
float4x4 res;
res.m_col[0] = make_float4(+cosy, 0.0f, -siny, 0.0f);
res.m_col[1] = make_float4(0.0f, 1.0f, 0.0f, 0.0f);
res.m_col[2] = make_float4(+siny, 0.0f, +cosy, 0.0f);
res.m_col[3] = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
return res;
}
static inline float4 mul4x4x4(float4x4 m, float4 v)
{
float4 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x + v.w * m.m_col[3].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y + v.w * m.m_col[3].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z + v.w * m.m_col[3].z;
res.w = v.x * m.m_col[0].w + v.y * m.m_col[1].w + v.z * m.m_col[2].w + v.w * m.m_col[3].w;
return res;
}
#ifndef COMMON_CPLUS_PLUS_CODE
static inline float3 mul(float4x4 m, float3 v)
{
float3 res;
res.x = v.x * m.m_col[0].x + v.y * m.m_col[1].x + v.z * m.m_col[2].x + m.m_col[3].x;
res.y = v.x * m.m_col[0].y + v.y * m.m_col[1].y + v.z * m.m_col[2].y + m.m_col[3].y;
res.z = v.x * m.m_col[0].z + v.y * m.m_col[1].z + v.z * m.m_col[2].z + m.m_col[3].z;
return res;
}
#endif
static inline float3x3 make_float3x3(float3 a, float3 b, float3 c)
{
float3x3 m;
m.row[0] = a;
m.row[1] = b;
m.row[2] = c;
return m;
}
static inline float3x3 make_float3x3_by_columns(float3 a, float3 b, float3 c)
{
float3x3 m;
m.row[0].x = a.x;
m.row[1].x = a.y;
m.row[2].x = a.z;
m.row[0].y = b.x;
m.row[1].y = b.y;
m.row[2].y = b.z;
m.row[0].z = c.x;
m.row[1].z = c.y;
m.row[2].z = c.z;
return m;
}
static inline float3 mul3x3x3(float3x3 m, const float3 v)
{
float3 res;
res.x = m.row[0].x*v.x + m.row[0].y*v.y + m.row[0].z*v.z;
res.y = m.row[1].x*v.x + m.row[1].y*v.y + m.row[1].z*v.z;
res.z = m.row[2].x*v.x + m.row[2].y*v.y + m.row[2].z*v.z;
return res;
}
static inline float3x3 mul3x3x3x3(float3x3 m1, float3x3 m2)
{
float3 column1 = mul3x3x3(m1, make_float3(m2.row[0].x, m2.row[1].x, m2.row[2].x));
float3 column2 = mul3x3x3(m1, make_float3(m2.row[0].y, m2.row[1].y, m2.row[2].y));
float3 column3 = mul3x3x3(m1, make_float3(m2.row[0].z, m2.row[1].z, m2.row[2].z));
return make_float3x3_by_columns(column1, column2, column3);
}
static inline float3x3 inverse(float3x3 a)
{
float det = a.row[0].x * (a.row[1].y * a.row[2].z - a.row[1].z * a.row[2].y) -
a.row[0].y * (a.row[1].x * a.row[2].z - a.row[1].z * a.row[2].x) +
a.row[0].z * (a.row[1].x * a.row[2].y - a.row[1].y * a.row[2].x);
float3x3 b;
b.row[0].x = (a.row[1].y * a.row[2].z - a.row[1].z * a.row[2].y);
b.row[0].y = -(a.row[0].y * a.row[2].z - a.row[0].z * a.row[2].y);
b.row[0].z = (a.row[0].y * a.row[1].z - a.row[0].z * a.row[1].y);
b.row[1].x = -(a.row[1].x * a.row[2].z - a.row[1].z * a.row[2].x);
b.row[1].y = (a.row[0].x * a.row[2].z - a.row[0].z * a.row[2].x);
b.row[1].z = -(a.row[0].x * a.row[1].z - a.row[0].z * a.row[1].x);
b.row[2].x = (a.row[1].x * a.row[2].y - a.row[1].y * a.row[2].x);
b.row[2].y = -(a.row[0].x * a.row[2].y - a.row[0].y * a.row[2].x);
b.row[2].z = (a.row[0].x * a.row[1].y - a.row[0].y * a.row[1].x);
float s = 1.0f / det;
b.row[0] *= s;
b.row[1] *= s;
b.row[2] *= s;
return b;
}
#ifndef COMMON_CPLUS_PLUS_CODE
static inline float4x4 inverse4x4(float4x4 m1)
{
float tmp[12]; // temp array for pairs
float4x4 m;
// calculate pairs for first 8 elements (cofactors)
//
tmp[0] = m1.m_col[2].z * m1.m_col[3].w;
tmp[1] = m1.m_col[3].z * m1.m_col[2].w;
tmp[2] = m1.m_col[1].z * m1.m_col[3].w;
tmp[3] = m1.m_col[3].z * m1.m_col[1].w;
tmp[4] = m1.m_col[1].z * m1.m_col[2].w;
tmp[5] = m1.m_col[2].z * m1.m_col[1].w;
tmp[6] = m1.m_col[0].z * m1.m_col[3].w;
tmp[7] = m1.m_col[3].z * m1.m_col[0].w;
tmp[8] = m1.m_col[0].z * m1.m_col[2].w;
tmp[9] = m1.m_col[2].z * m1.m_col[0].w;
tmp[10] = m1.m_col[0].z * m1.m_col[1].w;
tmp[11] = m1.m_col[1].z * m1.m_col[0].w;
// calculate first 8 m1.rowents (cofactors)
//
m.m_col[0].x = tmp[0] * m1.m_col[1].y + tmp[3] * m1.m_col[2].y + tmp[4] * m1.m_col[3].y;
m.m_col[0].x -= tmp[1] * m1.m_col[1].y + tmp[2] * m1.m_col[2].y + tmp[5] * m1.m_col[3].y;
m.m_col[0].y = tmp[1] * m1.m_col[0].y + tmp[6] * m1.m_col[2].y + tmp[9] * m1.m_col[3].y;
m.m_col[0].y -= tmp[0] * m1.m_col[0].y + tmp[7] * m1.m_col[2].y + tmp[8] * m1.m_col[3].y;
m.m_col[0].z = tmp[2] * m1.m_col[0].y + tmp[7] * m1.m_col[1].y + tmp[10] * m1.m_col[3].y;
m.m_col[0].z -= tmp[3] * m1.m_col[0].y + tmp[6] * m1.m_col[1].y + tmp[11] * m1.m_col[3].y;
m.m_col[0].w = tmp[5] * m1.m_col[0].y + tmp[8] * m1.m_col[1].y + tmp[11] * m1.m_col[2].y;
m.m_col[0].w -= tmp[4] * m1.m_col[0].y + tmp[9] * m1.m_col[1].y + tmp[10] * m1.m_col[2].y;
m.m_col[1].x = tmp[1] * m1.m_col[1].x + tmp[2] * m1.m_col[2].x + tmp[5] * m1.m_col[3].x;
m.m_col[1].x -= tmp[0] * m1.m_col[1].x + tmp[3] * m1.m_col[2].x + tmp[4] * m1.m_col[3].x;
m.m_col[1].y = tmp[0] * m1.m_col[0].x + tmp[7] * m1.m_col[2].x + tmp[8] * m1.m_col[3].x;
m.m_col[1].y -= tmp[1] * m1.m_col[0].x + tmp[6] * m1.m_col[2].x + tmp[9] * m1.m_col[3].x;
m.m_col[1].z = tmp[3] * m1.m_col[0].x + tmp[6] * m1.m_col[1].x + tmp[11] * m1.m_col[3].x;
m.m_col[1].z -= tmp[2] * m1.m_col[0].x + tmp[7] * m1.m_col[1].x + tmp[10] * m1.m_col[3].x;
m.m_col[1].w = tmp[4] * m1.m_col[0].x + tmp[9] * m1.m_col[1].x + tmp[10] * m1.m_col[2].x;
m.m_col[1].w -= tmp[5] * m1.m_col[0].x + tmp[8] * m1.m_col[1].x + tmp[11] * m1.m_col[2].x;
// calculate pairs for second 8 m1.rowents (cofactors)
//
tmp[0] = m1.m_col[2].x * m1.m_col[3].y;
tmp[1] = m1.m_col[3].x * m1.m_col[2].y;
tmp[2] = m1.m_col[1].x * m1.m_col[3].y;
tmp[3] = m1.m_col[3].x * m1.m_col[1].y;
tmp[4] = m1.m_col[1].x * m1.m_col[2].y;
tmp[5] = m1.m_col[2].x * m1.m_col[1].y;
tmp[6] = m1.m_col[0].x * m1.m_col[3].y;
tmp[7] = m1.m_col[3].x * m1.m_col[0].y;
tmp[8] = m1.m_col[0].x * m1.m_col[2].y;
tmp[9] = m1.m_col[2].x * m1.m_col[0].y;
tmp[10] = m1.m_col[0].x * m1.m_col[1].y;
tmp[11] = m1.m_col[1].x * m1.m_col[0].y;
// calculate second 8 m1 (cofactors)
//
m.m_col[2].x = tmp[0] * m1.m_col[1].w + tmp[3] * m1.m_col[2].w + tmp[4] * m1.m_col[3].w;
m.m_col[2].x -= tmp[1] * m1.m_col[1].w + tmp[2] * m1.m_col[2].w + tmp[5] * m1.m_col[3].w;
m.m_col[2].y = tmp[1] * m1.m_col[0].w + tmp[6] * m1.m_col[2].w + tmp[9] * m1.m_col[3].w;
m.m_col[2].y -= tmp[0] * m1.m_col[0].w + tmp[7] * m1.m_col[2].w + tmp[8] * m1.m_col[3].w;
m.m_col[2].z = tmp[2] * m1.m_col[0].w + tmp[7] * m1.m_col[1].w + tmp[10] * m1.m_col[3].w;
m.m_col[2].z -= tmp[3] * m1.m_col[0].w + tmp[6] * m1.m_col[1].w + tmp[11] * m1.m_col[3].w;
m.m_col[2].w = tmp[5] * m1.m_col[0].w + tmp[8] * m1.m_col[1].w + tmp[11] * m1.m_col[2].w;
m.m_col[2].w -= tmp[4] * m1.m_col[0].w + tmp[9] * m1.m_col[1].w + tmp[10] * m1.m_col[2].w;
m.m_col[3].x = tmp[2] * m1.m_col[2].z + tmp[5] * m1.m_col[3].z + tmp[1] * m1.m_col[1].z;
m.m_col[3].x -= tmp[4] * m1.m_col[3].z + tmp[0] * m1.m_col[1].z + tmp[3] * m1.m_col[2].z;
m.m_col[3].y = tmp[8] * m1.m_col[3].z + tmp[0] * m1.m_col[0].z + tmp[7] * m1.m_col[2].z;
m.m_col[3].y -= tmp[6] * m1.m_col[2].z + tmp[9] * m1.m_col[3].z + tmp[1] * m1.m_col[0].z;
m.m_col[3].z = tmp[6] * m1.m_col[1].z + tmp[11] * m1.m_col[3].z + tmp[3] * m1.m_col[0].z;
m.m_col[3].z -= tmp[10] * m1.m_col[3].z + tmp[2] * m1.m_col[0].z + tmp[7] * m1.m_col[1].z;
m.m_col[3].w = tmp[10] * m1.m_col[2].z + tmp[4] * m1.m_col[0].z + tmp[9] * m1.m_col[1].z;
m.m_col[3].w -= tmp[8] * m1.m_col[1].z + tmp[11] * m1.m_col[2].z + tmp[5] * m1.m_col[0].z;
// calculate matrix inverse
//
const float k = 1.0f / (m1.m_col[0].x * m.m_col[0].x + m1.m_col[1].x * m.m_col[0].y +
m1.m_col[2].x * m.m_col[0].z + m1.m_col[3].x * m.m_col[0].w);
const float4 vK = make_float4(k,k,k,k);
m.m_col[0] = m.m_col[0]*vK;
m.m_col[1] = m.m_col[1]*vK;
m.m_col[2] = m.m_col[2]*vK;
m.m_col[3] = m.m_col[3]*vK;
return m;
}
// Look At matrix creation
// return the inverse view matrix
//
static inline float4x4 lookAt(float3 eye, float3 center, float3 up)
{
float3 x, y, z; // basis; will make a rotation matrix
z.x = eye.x - center.x;
z.y = eye.y - center.y;
z.z = eye.z - center.z;
z = normalize(z);
y.x = up.x;
y.y = up.y;
y.z = up.z;
x = cross(y, z); // X vector = Y cross Z
y = cross(z, x); // Recompute Y = Z cross X
// cross product gives area of parallelogram, which is < 1.0 for
// non-perpendicular unit-length vectors; so normalize x, y here
x = normalize(x);
y = normalize(y);
float4x4 M;
M.m_col[0] = make_float4(x.x, y.x, z.x, 0.0f);
M.m_col[1] = make_float4(x.y, y.y, z.y, 0.0f);
M.m_col[2] = make_float4(x.z, y.z, z.z, 0.0f);
M.m_col[3] = make_float4(-x.x * eye.x - x.y * eye.y - x.z*eye.z,
-y.x * eye.x - y.y * eye.y - y.z*eye.z,
-z.x * eye.x - z.y * eye.y - z.z*eye.z,
1.0f );
return M;
}
static inline float4x4 transpose(const float4x4 a_mat)
{
float4x4 res;
res.m_col[0] = make_float4(a_mat.m_col[0].x, a_mat.m_col[1].x, a_mat.m_col[2].x, a_mat.m_col[3].x);
res.m_col[1] = make_float4(a_mat.m_col[0].y, a_mat.m_col[1].y, a_mat.m_col[2].y, a_mat.m_col[3].y);
res.m_col[2] = make_float4(a_mat.m_col[0].z, a_mat.m_col[1].z, a_mat.m_col[2].z, a_mat.m_col[3].z);
res.m_col[3] = make_float4(a_mat.m_col[0].w, a_mat.m_col[1].w, a_mat.m_col[2].w, a_mat.m_col[3].w);
return res;
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static inline float3 EyeRayDir(float x, float y, float w, float h, float4x4 a_mViewProjInv) // g_mViewProjInv
{
float4 pos = make_float4( 2.0f * (x + 0.5f) / w - 1.0f,
-2.0f * (y + 0.5f) / h + 1.0f,
0.0f,
1.0f );
pos = mul4x4x4(a_mViewProjInv, pos);
pos /= pos.w;
pos.y *= (-1.0f);
return normalize(to_float3(pos));
}
static inline void matrix4x4f_mult_ray3(float4x4 a_mWorldViewInv, __private float3* ray_pos, __private float3* ray_dir) // g_mWorldViewInv
{
float3 pos = mul(a_mWorldViewInv, (*ray_pos));
float3 pos2 = mul(a_mWorldViewInv, ((*ray_pos) + 100.0f*(*ray_dir)));
float3 diff = pos2 - pos;
(*ray_pos) = pos;
(*ray_dir) = normalize(diff);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////
static inline float3 matrix3x3f_mult_float3(__global const float* M, float3 v)
{
float3 res;
res.x = M[0 * 3 + 0] * v.x + M[0 * 3 + 1] * v.y + M[0 * 3 + 2] * v.z;
res.y = M[1 * 3 + 0] * v.x + M[1 * 3 + 1] * v.y + M[1 * 3 + 2] * v.z;
res.z = M[2 * 3 + 0] * v.x + M[2 * 3 + 1] * v.y + M[2 * 3 + 2] * v.z;
return res;
}
static inline float3x3 RotateAroundVector3x3(float3 v, float rotAngle)
{
const float cos_t = cos(rotAngle);
const float sin_t = sin(rotAngle);
float3x3 m;
m.row[0].x = (1.0f - cos_t)*v.x*v.x + cos_t;
m.row[0].y = (1.0f - cos_t)*v.x*v.y - sin_t*v.z;
m.row[0].z = (1.0f - cos_t)*v.x*v.z + sin_t*v.y;
m.row[1].x = (1.0f - cos_t)*v.y*v.x + sin_t*v.z;
m.row[1].y = (1.0f - cos_t)*v.y*v.y + cos_t;
m.row[1].z = (1.0f - cos_t)*v.y*v.z - sin_t*v.x;
m.row[2].x = (1.0f - cos_t)*v.x*v.z - sin_t*v.y;
m.row[2].y = (1.0f - cos_t)*v.z*v.y + sin_t*v.x;
m.row[2].z = (1.0f - cos_t)*v.z*v.z + cos_t;
return m;
}
static inline float4x4 RotateAroundVector4x4(float3 v, float rotAngle)
{
const float cos_t = cos(rotAngle);
const float sin_t = sin(rotAngle);
float4x4 m;
m.m_col[0].x = (1.0f - cos_t)*v.x*v.x + cos_t;
m.m_col[1].x = (1.0f - cos_t)*v.x*v.y - sin_t*v.z;
m.m_col[2].x = (1.0f - cos_t)*v.x*v.z + sin_t*v.y;
m.m_col[3].x = 1.0f;
m.m_col[0].y = (1.0f - cos_t)*v.y*v.x + sin_t*v.z;
m.m_col[1].y = (1.0f - cos_t)*v.y*v.y + cos_t;
m.m_col[2].y = (1.0f - cos_t)*v.y*v.z - sin_t*v.x;
m.m_col[3].y = 1.0f;
m.m_col[0].z = (1.0f - cos_t)*v.x*v.z - sin_t*v.y;
m.m_col[1].z = (1.0f - cos_t)*v.z*v.y + sin_t*v.x;
m.m_col[2].z = (1.0f - cos_t)*v.z*v.z + cos_t;
m.m_col[3].z = 1.0f;
m.m_col[0].w = 0.0f;
m.m_col[1].w = 0.0f;
m.m_col[2].w = 0.0f;
m.m_col[3].w = 1.0f;
return m;
}
static inline float DistanceSquared(float3 a, float3 b)
{
float3 diff = b - a;
return dot(diff, diff);
}
static inline float UniformConePdf(float cosThetaMax) { return 1.0f / (2.0f * M_PI * (1.0f - cosThetaMax)); }
static inline float3 UniformSampleSphere(float u1, float u2)
{
float z = 1.0f - 2.0f * u1;
float r = sqrt(fmax(0.0f, 1.0f - z*z));
float phi = 2.0f * M_PI * u2;
float x = r * cos(phi);
float y = r * sin(phi);
return make_float3(x, y, z);
}
static inline float lerp2(float t, float a, float b)
{
return (1.0f - t) * a + t * b;
}
static inline float3 UniformSampleCone(float u1, float u2, float costhetamax, float3 x, float3 y, float3 z)
{
float costheta = lerp2(u1, costhetamax, 1.0f);
float sintheta = sqrt(1.0f - costheta*costheta);
float phi = u2 * 2.0f * M_PI;
return cos(phi) * sintheta * x + sin(phi) * sintheta * y + costheta * z;
}
static inline float2 RaySphereIntersect(float3 rayPos, float3 rayDir, float3 sphPos, float radius)
{
float3 k = rayPos - sphPos;
float b = dot(k, rayDir);
float c = dot(k, k) - radius*radius;
float d = b * b - c;
float2 res;
if (d >= 0.0f)
{
float sqrtd = sqrt(d);
float t1 = -b - sqrtd;
float t2 = -b + sqrtd;
res.x = fmin(t1, t2);
res.y = fmax(t1, t2);
}
else
{
res.x = -1e28f;
res.y = -1e28f;
}
return res;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct ObjectListTriangle
{
float4 v1;
float4 v2;
float4 v3;
};
struct ObjectListSphere
{
float3 pos;
float r;
};
struct ObjectList
{
#ifndef OCL_COMPILER
ObjectList() { m_triangleCount = m_offset = dummy1 = dummy2 = 0; }
inline ObjectListTriangle* GetTriangles() const { return (ObjectListTriangle*)(((char*)this) + sizeof(ObjectList)); }
inline const ObjectListSphere* GetSpheres() const { return (const ObjectListSphere*)((char*)this + sizeof(ObjectList) + m_triangleCount*sizeof(ObjectListTriangle)); }
#endif
int m_offset;
int m_triangleCount;
int dummy1;
int dummy2;
};
static inline int GetNumTriangles(struct ObjectList ol) { return ol.m_triangleCount; }
static inline int GetOffset(struct ObjectList ol) { return ol.m_offset; }
static inline int GetNumPrimitives(struct ObjectList ol) { return GetNumTriangles(ol); }
struct ALIGN_S(16) Lite_HitT
{
float t;
int primId;
int instId;
int geomId;
};
typedef struct Lite_HitT Lite_Hit;
static inline Lite_Hit Make_Lite_Hit(float t, int a_treeId)
{
int a_geomId = 0;
Lite_Hit hit;
hit.t = t;
hit.primId = -1;
hit.instId = -1;
hit.geomId = (a_geomId & 0x3FFFFFFF) | ((a_treeId << 30) & 0xC0000000);
return hit;
}
static inline bool HitNone(const Lite_Hit a_hit) { return (a_hit.primId == -1) || !isfinite(a_hit.t); }
static inline bool HitSome(const Lite_Hit a_hit) { return (a_hit.primId != -1) && isfinite(a_hit.t); }
static inline int IS_LEAF(int a_leftOffsetAndLeaf) { return a_leftOffsetAndLeaf & 0x80000000; }
static inline int PACK_LEAF_AND_OFFSET(int a_leftOffset, int leaf) { return (a_leftOffset & 0x7fffffff) | (leaf & 0x80000000); }
static inline int EXTRACT_OFFSET(int a_leftOffsetAndLeaf) { return a_leftOffsetAndLeaf & 0x7fffffff; }
// a know about bit fields, but in CUDA they didn't work
//
struct BVHNodeT
{
#ifndef OCL_COMPILER
BVHNodeT()
{
m_leftOffsetAndLeaf = 0xffffffff;
m_escapeIndex = 0xffffffff;
m_boxMin = float3(INFINITY, INFINITY, INFINITY);
m_boxMax = float3(-INFINITY, -INFINITY, -INFINITY);
}
inline unsigned int Leaf() const { return (m_leftOffsetAndLeaf & 0x80000000) >> 31; }
inline void SetLeaf(unsigned int a_Leaf) { m_leftOffsetAndLeaf = (m_leftOffsetAndLeaf & 0x7fffffff) | ((a_Leaf) << 31); }
inline void SetLeftOffset(unsigned int in_offset) { m_leftOffsetAndLeaf = (m_leftOffsetAndLeaf & 0x80000000) | (in_offset & 0x7fffffff); }
inline void SetObjectListOffset(unsigned int in_offset)
{
if (Leaf())
SetLeftOffset(in_offset);
}
inline unsigned int GetLeftOffset() const { return m_leftOffsetAndLeaf & 0x7fffffff; }
inline unsigned int GetRightOffset() const { return GetLeftOffset() + 1; }
inline unsigned int GetObjectListOffset() const { return GetLeftOffset(); }
inline void SetInstance(unsigned int a_Leaf) { m_escapeIndex = a_Leaf; }
inline unsigned int Instance() const { return (m_escapeIndex == 1); }
#endif
float3 m_boxMin;
unsigned int m_leftOffsetAndLeaf;
float3 m_boxMax;
unsigned int m_escapeIndex;
};
typedef struct BVHNodeT BVHNode;
static inline bool IsValidNode(const BVHNode a_node) { return !((a_node.m_leftOffsetAndLeaf == 0xffffffff) && (a_node.m_escapeIndex == 0xffffffff)); }
struct _PACKED RayFlagsT
{
unsigned char diffuseBounceNum;
unsigned char bounceNum;
unsigned short otherFlags;
};
typedef struct RayFlagsT RayFlags;
enum MATERIAL_EVENT {
RAY_EVENT_S = 1, ///< Indicates Specular reflection or refraction (check for RAY_EVENT_T)
RAY_EVENT_D = 2, ///< Indicates Diffuse reflection or translucent (check for RAY_EVENT_T)
RAY_EVENT_G = 4, ///< Indicates GLossy reflection or refraction (check for RAY_EVENT_T)
RAY_EVENT_T = 8, ///< Indicates Transparensy or reftacrion.
RAY_EVENT_V = 16, ///< Indicates Volume scattering, not used for a while
RAY_EVENT_TOUT = 32, ///< Indicates Transparensy Outside of water or glass or e.t.c. (old RAY_IS_INSIDE_TRANSPARENT_OBJECT = 128)
RAY_EVENT_TNINGLASS = 64,
};
static inline bool isPureSpecular(const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_S) != 0 || (a_sample.flags & RAY_EVENT_T) != 0; }
static inline bool isDiffuse (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_D) != 0; }
static inline bool isGlossy (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_G) != 0; }
static inline bool isTransparent (const MatSample a_sample) { return (a_sample.flags & RAY_EVENT_T) != 0; }
enum {
RAY_GRAMMAR_DIRECT_LIGHT = 64,
RAY_GRAMMAR_OUT_OF_SCENE = 128,
RAY_DUMMY_FLAG_NOT_USED = 256,
RAY_HIT_SURFACE_FROM_OTHER_SIDE = 2048,
RAY_IS_DEAD = 4096, // set when ray had account environment or died on the surface
RAY_SHADE_FROM_OTHER_SIDE = 8192,
RAY_SHADE_FROM_SKY_LIGHT = 16384,
RAY_WILL_DIE_NEXT_BOUNCE = 32768, // set when ray had account only light on next bounce and then immediately die
};
static inline uint unpackRayFlags(uint a_flags) { return ((a_flags & 0xFFFF0000) >> 16); }
static inline uint packRayFlags(uint a_oldData, uint a_flags) { return (a_oldData & 0x0000FFFF) | (a_flags << 16); }
static inline uint unpackBounceNum(uint a_flags) { return ((a_flags & 0x0000FF00) >> 8); }
static inline uint unpackBounceNumDiff(uint a_flags) { return (a_flags & 0x000000FF); }
static inline uint packBounceNum (uint a_oldData, uint a_bounceNum) { return (a_oldData & 0xFFFF00FF) | (a_bounceNum << 8); }
static inline uint packBounceNumDiff(uint a_oldData, uint a_bounceNum) { return (a_oldData & 0xFFFFFF00) | (a_bounceNum); }
static inline bool rayIsActiveS(RayFlags a_flags) { return (a_flags.otherFlags & (RAY_GRAMMAR_OUT_OF_SCENE | RAY_IS_DEAD)) == 0; }
static inline bool rayIsActiveU(uint a_flags) { return ( ((a_flags & 0xFFFF0000) >> 16) & (RAY_GRAMMAR_OUT_OF_SCENE | RAY_IS_DEAD)) == 0; }
static inline bool isEyeRay(uint a_flags)
{
const uint otherFlags = unpackRayFlags(a_flags);
const bool haveSomeNonSpecularReflections = (otherFlags & RAY_EVENT_D) || (otherFlags & RAY_EVENT_G);
return (unpackBounceNum(a_flags) == 0) || !haveSomeNonSpecularReflections;
// return (unpackBounceNum(a_flags) == 0);
}
/**
\brief This structure is used as transit to pass MIS-weights-important-data from previouce bounce to current (or from current to next).
*/
typedef struct MisDataT
{
float matSamplePdf; ///< previous angle pdf (pdfW) that were used for sampling material.
float cosThetaPrev; ///< previous angle cos; it allow to compute projected angle pdf (pdfWP = pdfW/cosThetaPrev);
int prevMaterialOffset; ///< offset in material buffer to material leaf (elemental brdf) that were sampled on prev bounce; it is needed to disable caustics;
int isSpecular; ///< indicate if bounce was pure specular;
} MisData;
static inline MisData makeInitialMisData()
{
MisData data;
data.matSamplePdf = 1.0f;
data.cosThetaPrev = 1.0f;
data.prevMaterialOffset = -1;
data.isSpecular = 1;
return data;
}
static inline unsigned int encodeNormal(float3 n)
{
const int x = (int)(n.x*32767.0f);
const int y = (int)(n.y*32767.0f);
const unsigned int sign = (n.z >= 0) ? 0 : 1;
const unsigned int sx = ((unsigned int)(x & 0xfffe) | sign);
const unsigned int sy = ((unsigned int)(y & 0xffff) << 16);
return (sx | sy);
}
static inline float3 decodeNormal(unsigned int a_data)
{
const unsigned int a_enc_x = (a_data & 0x0000FFFF);
const unsigned int a_enc_y = ((a_data & 0xFFFF0000) >> 16);
const float sign = (a_enc_x & 0x0001) ? -1.0f : 1.0f;
const float x = ((short)(a_enc_x & 0xfffe))*(1.0f / 32767.0f);
const float y = ((short)(a_enc_y & 0xffff))*(1.0f / 32767.0f);
const float z = sign*sqrt(fmax(1.0f - x*x - y*y, 0.0f));
return make_float3(x, y, z);
}
/*
struct ALIGN_S(16) HitPosNormT
{
float pos_x;
float pos_y;
float pos_z;
uint norm_xy;
#ifdef __CUDACC__
__device__ float3 GetNormal() const { return decodeNormal(norm_xy); }
__device__ void SetNormal(float3 a_norm) { norm_xy = encodeNormal(normalize(a_norm)); }
#endif
};
typedef struct HitPosNormT HitPosNorm;
static inline HitPosNorm make_HitPosNorm(float4 a_data)
{
HitPosNorm res;
res.pos_x = a_data.x;
res.pos_y = a_data.y;
res.pos_z = a_data.z;
res.norm_xy = (uint)(as_int(a_data.w));
return res;
}
static inline float3 GetPos(HitPosNorm a_data) { return make_float3(a_data.pos_x, a_data.pos_y, a_data.pos_z); }
static inline void SetPos(__private HitPosNorm* a_pData, float3 a_pos) { a_pData->pos_x = a_pos.x; a_pData->pos_y = a_pos.y; a_pData->pos_z = a_pos.z; }
*/
struct ALIGN_S(8) HitTexCoordT
{
float tex_u;
float tex_v;
};
typedef struct HitTexCoordT HitTexCoord;
struct ALIGN_S(8) HitMatRefT
{
int m_data;
float accumDist;
};
typedef struct HitMatRefT HitMatRef;
static inline int GetMaterialId(HitMatRef a_hitMat) { return a_hitMat.m_data; }
static inline void SetHitType(__private HitMatRef* a_pHitMat, int a_id)
{
int mask = a_id << 28;
int m_data2 = a_pHitMat->m_data & 0x0FFFFFFF;
a_pHitMat->m_data = m_data2 | mask;
}
static inline void SetMaterialId(__private HitMatRef* a_pHitMat, int a_mat_id)
{
int mask = a_mat_id & 0x0FFFFFFF;
int m_data2 = a_pHitMat->m_data & 0xF0000000;
a_pHitMat->m_data = m_data2 | mask;
}
struct ALIGN_S(8) Hit_Part4T
{
uint tangentCompressed;
uint bitangentCompressed;
};
typedef struct Hit_Part4T Hit_Part4;
static inline void CoordinateSystem(float3 v1, __private float3* v2, __private float3* v3)
{
float invLen = 1.0f;
if (fabs(v1.x) > fabs(v1.y))
{
invLen = 1.0f / sqrt(v1.x*v1.x + v1.z*v1.z);
(*v2) = make_float3((-1.0f) * v1.z * invLen, 0.0f, v1.x * invLen);
}
else
{
invLen = 1.0f / sqrt(v1.y * v1.y + v1.z * v1.z);
(*v2) = make_float3(0.0f, v1.z * invLen, (-1.0f) * v1.y * invLen);
}
(*v3) = cross(v1, (*v2));
}
static inline float3 MapSampleToCosineDistribution(float r1, float r2, float3 direction, float3 hit_norm, float power)
{
if(power >= 1e6f)
return direction;
float sin_phi = sin(2.0f*r1*3.141592654f);
float cos_phi = cos(2.0f*r1*3.141592654f);
//sincos(2.0f*r1*3.141592654f, &sin_phi, &cos_phi);
float cos_theta = pow(1.0f - r2, 1.0f / (power + 1.0f));
float sin_theta = sqrt(1.0f - cos_theta*cos_theta);
float3 deviation;
deviation.x = sin_theta*cos_phi;
deviation.y = sin_theta*sin_phi;
deviation.z = cos_theta;
float3 ny = direction, nx, nz;
CoordinateSystem(ny, &nx, &nz);
{
float3 temp = ny;
ny = nz;
nz = temp;
}
float3 res = nx*deviation.x + ny*deviation.y + nz*deviation.z;
float invSign = dot(direction, hit_norm) > 0.0f ? 1.0f : -1.0f;
if (invSign*dot(res, hit_norm) < 0.0f) // reflected ray is below surface #CHECK_THIS
{
res = (-1.0f)*nx*deviation.x + ny*deviation.y - nz*deviation.z;
//belowSurface = true;
}
return res;
}
// Using the modified Phong reflectance model for physically based rendering
//
static inline float3 MapSampleToModifiedCosineDistribution(float r1, float r2, float3 direction, float3 hit_norm, float power,
bool* a_underSurface)
{
if (power >= 1e6f)
return direction;
// float sin_phi, cos_phi;
// sincosf(2 * r1*3.141592654f, &sin_phi, &cos_phi);
float sin_phi = sin(2.0f*r1*3.141592654f);
float cos_phi = cos(2.0f*r1*3.141592654f);
float sin_theta = sqrt(1.0f - pow(r2, 2.0f / (power + 1.0f)));
float3 deviation;
deviation.x = sin_theta*cos_phi;
deviation.y = sin_theta*sin_phi;
deviation.z = sqrt(1.0f - deviation.x*deviation.x - deviation.y*deviation.y); //pow(r2, 1.0f / (power + 1.0f));
float3 ny = direction, nx, nz;
CoordinateSystem(ny, &nx, &nz);
{
float3 temp = ny;
ny = nz;
nz = temp;
}
float3 res = nx*deviation.x + ny*deviation.y + nz*deviation.z;
(*a_underSurface) = false;
const float invSign = dot(direction, hit_norm) >= 0.0f ? 1.0f : -1.0f;
if (invSign*dot(res, hit_norm) < 0.0f) // reflected ray is below surface
{
res = (-1.0f)*nx*deviation.x - ny*deviation.y + nz*deviation.z;
(*a_underSurface) = true;
}
return res;
}
/**
\brief transform float2 sample in rect [-1,1]x[-1,1] to disc centered at (0,0) with radius == 1.
\param xy - input sample in rect [-1,1]x[-1,1]
\return position in disc
*/
static inline float2 MapSamplesToDisc(float2 xy)
{
float x = xy.x;
float y = xy.y;
float r = 0;
float phi = 0;
float2 res = xy;
if (x>y && x>-y)
{
r = x;
phi = 0.25f*3.141592654f*(y / x);
}
if (x < y && x > -y)
{
r = y;
phi = 0.25f*3.141592654f*(2.0f - x / y);
}
if (x < y && x < -y)
{
r = -x;
phi = 0.25f*3.141592654f*(4.0f + y / x);
}
if (x >y && x<-y)
{
r = -y;
phi = 0.25f*3.141592654f*(6 - x / y);
}
//float sin_phi, cos_phi;
//sincosf(phi, &sin_phi, &cos_phi);
float sin_phi = sin(phi);
float cos_phi = cos(phi);
res.x = r*sin_phi;
res.y = r*cos_phi;
return res;
}
static inline float3 MapSamplesToCone(float cosCutoff, float2 sample, float3 direction)
{
float cosTheta = (1.0f - sample.x) + sample.x * cosCutoff;
float sinTheta = sqrt(1.0f - cosTheta * cosTheta);
//float sinPhi, cosPhi;
//sincosf(2.0f * M_PI * sample.y, &sinPhi, &cosPhi);
float sinPhi = sin(2.0f * M_PI * sample.y);
float cosPhi = cos(2.0f * M_PI * sample.y);
float3 deviation = make_float3(cosPhi * sinTheta, sinPhi * sinTheta, cosTheta);
// transform to different basis
//
float3 ny = direction;
float3 nx, nz;
CoordinateSystem(ny, &nx, &nz);
//swap(ny, nz);
{
float3 temp = ny;
ny = nz;
nz = temp;
}
return nx*deviation.x + ny*deviation.y + nz*deviation.z;
}
static inline float3 MapSamplesToSphere(float r1, float r2) // [-1..1]
{
float phi = r1*3.141592654f * 2.0f; // [0 .. 2PI]
float h = r2*2.0f - 1.0f; // [-1 .. 1]
float sin_phi = sin(phi);
float cos_phi = cos(phi);
float x = sin_phi*sqrt(1 - h*h);
float y = cos_phi*sqrt(1 - h*h);
float z = h;
return make_float3(x, y, z);
}
struct ALIGN_S(16) ZBlockT
{
#ifndef OCL_COMPILER
#ifndef __CUDACC__
ZBlockT() { index = 0; diff = 100; counter = 0; index2 = 0; }
ZBlockT(int a_index, float a_diff)
{
index = a_index;
index2 = 0;
diff = a_diff;
counter = 0;
}
#endif
inline static int GetSize() { return Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE; }
inline int GetOffset() const { return index*GetSize(); }
inline bool operator<(const ZBlockT& rhs) const { return (diff < rhs.diff); }
#endif
int index; // just block offset if global screen buffer
int index2; // index in other buffer + avg trace depth
int counter; // how many times this block was traced?
float diff; // error in some units. stop criterion if fact
};
typedef struct ZBlockT ZBlock;
static bool BlockFinished(ZBlock block, int a_minRaysPerPixel, int a_maxRaysPerPixel, float* a_outDiff) // for use on the cpu side ... for current
{
int samplesPerPixel = block.counter; // was *2 due to odd and even staff
if(a_outDiff!=NULL)
*a_outDiff = block.diff;
float acceptedBadPixels = 8.0f; // sqrt((float)(CMP_RESULTS_BLOCK_SIZE));
int minRaysPerPixel = a_minRaysPerPixel;
bool summErrorOk = (block.diff <= acceptedBadPixels);
bool maxErrorOk = false;
return ((summErrorOk || maxErrorOk) && samplesPerPixel >= minRaysPerPixel) || (samplesPerPixel >= a_maxRaysPerPixel);
}
static inline uint ThreadSwizzle1D(uint pixelId, uint zBlockIndex)
{
uint indexInsideZBlock = pixelId%CMP_RESULTS_BLOCK_SIZE;
return zBlockIndex*CMP_RESULTS_BLOCK_SIZE + indexInsideZBlock;
}
static inline float PdfAtoW(const float aPdfA, const float aDist, const float aCosThere)
{
return (aPdfA*aDist*aDist) / fmax(aCosThere, DEPSILON2);
}
static inline float PdfWtoA(const float aPdfW, const float aDist, const float aCosThere)
{
return aPdfW * fabs(aCosThere) / fmax(aDist*aDist, DEPSILON2);
}
struct MRaysStat
{
#ifndef OCL_COMPILER
MRaysStat() { memset(this, 0, sizeof(MRaysStat)); }
#endif
int traceTimePerCent;
float raysPerSec;
float samplesPerSec;
float reorderTimeMs;
float traversalTimeMs;
float samLightTimeMs;
float shadowTimeMs;
float shadeTimeMs;
float bounceTimeMS;
float evalHitMs;
float nextBounceMs;
float sampleTimeMS;
};
static inline float probabilityAbsorbRR(uint a_flags, uint a_globalFlags)
{
if (a_globalFlags & HRT_ENABLE_MMLT) // metropolis don't use roultte
return 0.0f;
const uint diffBounceNum = unpackBounceNumDiff(a_flags);
const uint otherFlags = unpackRayFlags(a_flags);
float pabsorb = 0.0f;
if (diffBounceNum >= 4)
pabsorb = 0.50f;
else if (diffBounceNum >= 3)
pabsorb = 0.25f;
else
pabsorb = 0.0f;
return pabsorb;
}
static inline float MonteCarloVariance(float3 avgColor, float sqrColor, int nSamples)
{
const float maxColor = fmax(avgColor.x, fmax(avgColor.y, avgColor.z));
const float fnSamples = ((float)(nSamples));
const float nSampleInv = 1.0f / fnSamples;
return fabs(sqrColor*nSampleInv - (maxColor*maxColor*nSampleInv*nSampleInv));
}
static inline float MonteCarloRelErr(float maxColor, float sqrColor, int nSamples)
{
const float fnSamples = ((float)(nSamples));
const float nSampleInv = 1.0f / fnSamples;
const float variance = fabs(sqrColor*nSampleInv - (maxColor*maxColor*nSampleInv*nSampleInv));
const float stdError = sqrt(variance);
return stdError / (fmax(maxColor, 0.00001f));
}
static inline float MonteCarloRelErr2(float3 avgColor, float sqrColor, int nSamples)
{
const float maxColor = fmax(avgColor.x, fmax(avgColor.y, avgColor.z));
return MonteCarloRelErr(maxColor, sqrColor, nSamples);
}
static inline float colorSquareMax3(float3 calcColor)
{
float3 calcColorSqr;
calcColorSqr.x = calcColor.x*calcColor.x;
calcColorSqr.y = calcColor.y*calcColor.y;
calcColorSqr.z = calcColor.z*calcColor.z;
return fmax(calcColorSqr.x, fmax(calcColorSqr.y, calcColorSqr.z));
}
static inline float colorSquareMax4(float4 calcColor)
{
float4 calcColorSqr;
calcColorSqr.x = calcColor.x*calcColor.x;
calcColorSqr.y = calcColor.y*calcColor.y;
calcColorSqr.z = calcColor.z*calcColor.z;
calcColorSqr.w = calcColor.w*calcColor.w;
return fmax(calcColorSqr.x, fmax(calcColorSqr.y, calcColorSqr.z));
}
// Unpolarized fresnel reflection term for dielectric materials
// this formula is simplified and should be checked
//
static inline float fresnelCoeffSimple(float cosThetaI, float a_eta)
{
float g = sqrt(a_eta*a_eta - 1.0f + cosThetaI * cosThetaI);
float t1 = (g - cosThetaI) / (g + cosThetaI);
float t2 = (cosThetaI * (g + cosThetaI) - 1) / (cosThetaI * (g - cosThetaI) + 1.0f);
return 0.5f * t1 * t1 * (1.0f + t2 * t2);
}
// The following functions calculate the reflected and refracted
// directions in addition to the fresnel coefficients. Based on PBRT
// and the paper "Derivation of Refraction Formulas" by Paul S. Heckbert.
//
static inline float fresnelDielectric(float cosTheta1, float cosTheta2, float etaExt, float etaInt)
{
float Rs = (etaExt * cosTheta1 - etaInt * cosTheta2) / (etaExt * cosTheta1 + etaInt * cosTheta2);
float Rp = (etaInt * cosTheta1 - etaExt * cosTheta2) / (etaInt * cosTheta1 + etaExt * cosTheta2);
return (Rs * Rs + Rp * Rp) / 2.0f;
}
static inline float fresnelConductor(float cosTheta, float eta, float roughness)
{
float tmp = (eta*eta + roughness*roughness) * (cosTheta * cosTheta);
float rParl2 = (tmp - (eta * (2.0f * cosTheta)) + 1.0f) / (tmp + (eta * (2.0f * cosTheta)) + 1.0f);
float tmpF = eta*eta + roughness*roughness;
float rPerp2 = (tmpF - (eta * (2.0f * cosTheta)) + (cosTheta*cosTheta)) / (tmpF + (eta * (2.0f * cosTheta)) + (cosTheta*cosTheta));
return (rParl2 + rPerp2) / 2.0f;
}
static inline float fresnelReflectionCoeff(float cosTheta1, float etaExt, float etaInt)
{
// Swap the indices of refraction if the interaction starts
// at the inside of the object
//
if (cosTheta1 < 0.0f)
{
float temp = etaInt;
etaInt = etaExt;
etaExt = temp;
}
// Using Snell's law, calculate the sine of the angle
// between the transmitted ray and the surface normal
//
float sinTheta2 = etaExt / etaInt * sqrt(fmax(0.0f, 1.0f - cosTheta1*cosTheta1));
if (sinTheta2 > 1.0f)
return 1.0f; // Total internal reflection!
// Use the sin^2+cos^2=1 identity - max() guards against
// numerical imprecision
//
float cosTheta2 = sqrt(fmax(0.0f, 1.0f - sinTheta2*sinTheta2));
// Finally compute the reflection coefficient
//
return fresnelDielectric(fabs(cosTheta1), cosTheta2, etaInt, etaExt);
}
static inline float fresnelReflectionCoeffMentalLike(float cosTheta, float refractIOR)
{
return fresnelReflectionCoeff(fabs(cosTheta), 1.0f, refractIOR);
}
static inline float contribFunc(float3 color)
{
return fmax(0.33334f*(color.x + color.y + color.z), 0.0f);
}
static inline int packXY1616(int x, int y) { return (y << 16) | (x & 0x0000FFFF); }
// CPU and CUDA only code
//
#ifndef OCL_COMPILER
static inline float3 clamp3(float3 x, float a, float b) { return make_float3(fmin(fmax(x.x, a), b), fmin(fmax(x.y, a), b), fmin(fmax(x.z, a), b)); }
static unsigned short MortonTable256Host[] =
{
0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015,
0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055,
0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115,
0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155,
0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415,
0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455,
0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515,
0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555,
0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015,
0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055,
0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115,
0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155,
0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415,
0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455,
0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515,
0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555,
0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015,
0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055,
0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115,
0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155,
0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415,
0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455,
0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515,
0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555,
0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015,
0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055,
0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115,
0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155,
0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415,
0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455,
0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515,
0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555
};
static inline uint ZIndexHost(ushort x, ushort y)
{
return MortonTable256Host[y >> 8] << 17 |
MortonTable256Host[x >> 8] << 16 |
MortonTable256Host[y & 0xFF] << 1 |
MortonTable256Host[x & 0xFF];
}
static inline uint HostIndexZBlock2D(int x, int y, int pitch)
{
uint zOrderX = x % Z_ORDER_BLOCK_SIZE;
uint zOrderY = y % Z_ORDER_BLOCK_SIZE;
uint zIndex = ZIndexHost(zOrderX, zOrderY);
uint wBlocks = pitch / Z_ORDER_BLOCK_SIZE;
uint blockX = x / Z_ORDER_BLOCK_SIZE;
uint blockY = y / Z_ORDER_BLOCK_SIZE;
return (blockX + (blockY)*(wBlocks))*Z_ORDER_BLOCK_SIZE*Z_ORDER_BLOCK_SIZE + zIndex;
}
static void ImageZBlockMemToRowPitch(const float4* inData, float4* outData, int w, int h)
{
#pragma omp parallel for
for (int y = 0; y<h; y++)
{
for (int x = 0; x<w; x++)
{
int indexSrc = HostIndexZBlock2D(x, y, w);
int indexDst = Index2D(x, y, w);
outData[indexDst] = inData[indexSrc];
}
}
}
#endif
#ifdef __CUDACC__
#undef ushort
#undef uint
#endif
#define PREFIX_SUMM_MACRO(idata,odata,l_Data,_bsize) \
{ \
uint pos = 2 * LOCAL_ID_X - (LOCAL_ID_X & (_bsize - 1)); \
l_Data[pos] = 0; \
pos += _bsize; \
l_Data[pos] = idata; \
\
for (uint offset = 1; offset < _bsize; offset <<= 1) \
{ \
SYNCTHREADS_LOCAL; \
uint t = l_Data[pos] + l_Data[pos - offset]; \
SYNCTHREADS_LOCAL; \
l_Data[pos] = t; \
} \
\
odata = l_Data[pos]; \
} \
enum CLEAR_FLAGS{ CLEAR_MATERIALS = 1,
CLEAR_GEOMETRY = 2,
CLEAR_LIGHTS = 4,
CLEAR_TEXTURES = 8,
CLEAR_CUSTOM_DATA = 16,
CLEAR_ALL = CLEAR_MATERIALS | CLEAR_GEOMETRY | CLEAR_LIGHTS | CLEAR_TEXTURES | CLEAR_CUSTOM_DATA };
enum BVH_FLAGS { BVH_ENABLE_SMOOTH_OPACITY = 1};
typedef struct GBuffer1T
{
float depth;
float3 norm;
float4 rgba;
int matId;
float coverage;
} GBuffer1;
typedef struct GBuffer2T
{
float2 texCoord;
int objId;
int instId;
} GBuffer2;
typedef struct GBufferAll
{
GBuffer1 data1;
GBuffer2 data2;
} GBufferAll;
static inline void initGBufferAll(__private GBufferAll* a_pElem)
{
a_pElem->data1.depth = 1e+6f;
a_pElem->data1.norm = make_float3(0, 0, 0);
a_pElem->data1.rgba = make_float4(0, 0, 0, 1);
a_pElem->data1.matId = -1;
a_pElem->data1.coverage = 0.0f;
a_pElem->data2.texCoord = make_float2(0, 0);
a_pElem->data2.objId = -1;
a_pElem->data2.instId = -1;
}
#define GBUFFER_SAMPLES 16
#define PMPIX_SAMPLES 256 // Production Mode Pixel Samples
static inline float4 packGBuffer1(GBuffer1 a_input)
{
float4 resColor;
unsigned int packedRGBX = RealColorToUint32(a_input.rgba);
const float clampedCoverage = fmin(fmax(a_input.coverage*255.0f, 0.0f), 255.0f);
const int compressedCoverage = ((int)(clampedCoverage)) << 24;
const int packedMIdAncCov = (a_input.matId & 0x00FFFFFF) | (compressedCoverage & 0xFF000000);
resColor.x = a_input.depth;
resColor.y = as_float(encodeNormal(a_input.norm));
resColor.z = as_float(packedMIdAncCov);
resColor.w = as_float(packedRGBX);
return resColor;
}
static inline GBuffer1 unpackGBuffer1(float4 a_input)
{
GBuffer1 res;
res.depth = a_input.x;
res.norm = decodeNormal(as_int(a_input.y));
res.matId = as_int(a_input.z) & 0x00FFFFFF;
const int compressedCoverage = (as_int(a_input.z) & 0xFF000000) >> 24;
res.coverage = ((float)compressedCoverage)*(1.0f / 255.0f);
unsigned int rgba = as_int(a_input.w);
res.rgba.x = (rgba & 0x000000FF)*(1.0f / 255.0f);
res.rgba.y = ((rgba & 0x0000FF00) >> 8)*(1.0f / 255.0f);
res.rgba.z = ((rgba & 0x00FF0000) >> 16)*(1.0f / 255.0f);
res.rgba.w = ((rgba & 0xFF000000) >> 24)*(1.0f / 255.0f);
return res;
}
static inline float4 packGBuffer2(GBuffer2 a_input)
{
float4 res;
res.x = a_input.texCoord.x;
res.y = a_input.texCoord.y;
res.z = as_float(a_input.objId);
res.w = as_float(a_input.instId);
return res;
}
static inline GBuffer2 unpackGBuffer2(float4 a_input)
{
GBuffer2 res;
res.texCoord.x = a_input.x;
res.texCoord.y = a_input.y;
res.objId = as_int(a_input.z);
res.instId = as_int(a_input.w);
return res;
}
static inline float projectedPixelSize(float dist, float FOV, float w, float h)
{
float ppx = (FOV / w)*dist;
float ppy = (FOV / h)*dist;
if (dist > 0.0f)
return 2.0f*fmax(ppx, ppy);
else
return 1000.0f;
}
static inline float surfaceSimilarity(float4 data1, float4 data2, const float MADXDIFF)
{
const float MANXDIFF = 0.15f;
float3 n1 = to_float3(data1);
float3 n2 = to_float3(data2);
float dist = length(n1 - n2);
if (dist >= MANXDIFF)
return 0.0f;
float d1 = data1.w;
float d2 = data2.w;
if (fabs(d1 - d2) >= MADXDIFF)
return 0.0f;
float normalSimilar = sqrt(1.0f - (dist / MANXDIFF));
float depthSimilar = sqrt(1.0f - fabs(d1 - d2) / MADXDIFF);
return normalSimilar * depthSimilar;
}
static inline float gbuffDiff(GBufferAll s1, GBufferAll s2, const float a_fov, float w, float h)
{
const float ppSize = projectedPixelSize(s1.data1.depth, a_fov, w, h);
const float surfaceSimilar = surfaceSimilarity(to_float4(s1.data1.norm, s1.data1.depth),
to_float4(s2.data1.norm, s2.data1.depth), ppSize*2.0f);
const float surfaceDiff = 1.0f - surfaceSimilar;
const float objDiff = (s1.data2.instId == s2.data2.instId && s1.data2.objId == s2.data2.objId) ? 0.0f : 1.0f;
const float matDiff = (s1.data1.matId == s2.data1.matId) ? 0.0f : 1.0f;
const float alphaDiff = fabs(s1.data1.rgba.w - s2.data1.rgba.w);
return surfaceDiff + objDiff + matDiff + alphaDiff;
}
static inline float gbuffDiffObj(GBufferAll s1, GBufferAll s2, const float a_fov, int w, int h)
{
const float objDiff = (s1.data2.instId == s2.data2.instId && s1.data2.objId == s2.data2.objId) ? 0.0f : 1.0f;
const float matDiff = (s1.data1.matId == s2.data1.matId) ? 0.0f : 1.0f;
return objDiff + matDiff;
}
// static inline int reverseBits(int a_input, int a_maxSize)
// {
// int maxBit = 0;
// while (a_maxSize >>= 1)
// ++maxBit;
//
// int result = 0;
//
// for (int i = 0; i < maxBit; i++)
// {
// const int j = maxBit - i - 1;
// const int inputMask = (0x00000001 << j);
// result |= ((a_input & inputMask) >> j) << i;
// }
//
// return result;
// }
enum PLAIN_LIGHT_TYPES {
PLAIN_LIGHT_TYPE_POINT_OMNI = 0,
PLAIN_LIGHT_TYPE_POINT_SPOT = 1,
PLAIN_LIGHT_TYPE_DIRECT = 2,
PLAIN_LIGHT_TYPE_SKY_DOME = 3,
PLAIN_LIGHT_TYPE_AREA = 4,
PLAIN_LIGHT_TYPE_SPHERE = 5,
PLAIN_LIGHT_TYPE_CYLINDER = 6,
PLAIN_LIGHT_TYPE_MESH = 7,
};
enum PLAIN_LIGHT_FLAGS{
DISABLE_SAMPLING = 1,
SEPARATE_SKY_LIGHT_ENVIRONMENT = 2,
SKY_LIGHT_USE_PEREZ_ENVIRONMENT = 4,
AREA_LIGHT_SKY_PORTAL = 8,
LIGHT_HAS_IES = 16, ///< have spherical distribution mask around light
LIGHT_IES_POINT_AREA = 32, ///< apply IES honio from the center of light always.
LIGHT_DO_NOT_SAMPLE_ME = 64, ///< zero selection probability. never sample it.
};
enum SKY_PORTAL_COLOR_SOURCE { SKY_PORTAL_SOURCE_ENVIRONMENT = 1,
SKY_PORTAL_SOURCE_SKYLIGHT = 2,
SKY_PORTAL_SOURCE_CUSTOM = 3
};
static inline float3 triBaricentrics3(float3 ray_pos, float3 ray_dir, float3 A_pos, float3 B_pos, float3 C_pos)
{
const float3 edge1 = B_pos - A_pos;
const float3 edge2 = C_pos - A_pos;
const float3 pvec = cross(ray_dir, edge2);
const float det = dot(edge1, pvec);
const float inv_det = 1.0f / det;
const float3 tvec = ray_pos - A_pos;
const float v = dot(tvec, pvec)*inv_det;
const float3 qvec = cross(tvec, edge1);
const float u = dot(ray_dir, qvec)*inv_det;
const float t = dot(edge2, qvec)*inv_det;
return make_float3(u, v, t);
}
typedef struct ShadeContextT
{
float3 wp; ///< world pos
//float3 lp; ///< local pos
float3 l; ///< direction to light
float3 v; ///< view vector
float3 n; ///< smooth normal (for shading and new rays offsets)
float3 fn; ///< flat normal (for bump mapping and tangent space transform)
float3 tg; ///< tangent (for bump mapping and tangent space transform)
float3 bn; ///< binormal (for bump mapping and tangent space transform)
float2 tc; ///< tex coord (0);
//float2 tc1; ///< tex coord (1);
float2 tccp; ///< tex coord camera projected
bool hfi; ///< Hit.From.Inside. if hit surface from the inside of the object that have glass or SSS material
} ShadeContext;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define MAXPROCTEX 16
#define F4_PROCTEX_SIZE 12
/**
\brief this structure will store results of procedural texture kernel execution.
*/
typedef struct ProcTextureListT
{
int currMaxProcTex;
int id_f4 [MAXPROCTEX];
float3 fdata4[MAXPROCTEX];
} ProcTextureList;
static inline void InitProcTextureList(__private ProcTextureList* a_pList)
{
a_pList->currMaxProcTex = 0;
a_pList->id_f4[0] = INVALID_TEXTURE;
}
static inline void WriteProcTextureList(__global float4* fdata, int tid, int size, __private const ProcTextureList* a_pList)
{
__global int* idata = (__global int*)fdata;
if(a_pList->currMaxProcTex == 0)
{
idata[tid] = INVALID_TEXTURE;
return;
}
const int finalProcTex = (a_pList->currMaxProcTex > MAXPROCTEX) ? MAXPROCTEX : a_pList->currMaxProcTex;
for(int i=0;i<finalProcTex;i++)
idata[tid + size * i] = a_pList->id_f4[i];
if(finalProcTex < MAXPROCTEX)
idata[tid + size * finalProcTex] = INVALID_TEXTURE; // list end
#ifdef OCL_COMPILER
for(int i=0;i<finalProcTex;i+=2)
{
const float4 h1 = to_float4(a_pList->fdata4[i+0], 0.0f);
const float4 h2 = to_float4(a_pList->fdata4[i+1], 0.0f);
float8 data = {h1.x, h1.y, h1.z, h1.w,
h2.x, h2.y, h2.z, h2.w,};
const int offset = (tid + size * (i/2 + MAXPROCTEX/4));
vstore_half8(data, 0, (__global half*)(fdata + offset) );
}
#endif
}
static inline void ReadProcTextureList(__global float4* fdata, int tid, int size,
__private ProcTextureList* a_pList)
{
if (fdata == 0)
return;
__global int* idata = (__global int*)fdata;
int currMaxProcTex;
for(currMaxProcTex = 0; currMaxProcTex < MAXPROCTEX; currMaxProcTex++)
{
const int texId = idata[tid + size * currMaxProcTex];
a_pList->id_f4[currMaxProcTex] = texId;
if(texId == INVALID_TEXTURE)
break;
}
#ifdef OCL_COMPILER
for(int i=0;i<currMaxProcTex;i+=2)
{
const int offset = (tid + size * (i/2 + MAXPROCTEX/4));
const float8 data = vload_half8(0, (__global half*)(fdata + offset));
a_pList->fdata4[i+0] = to_float3(data.s0123);
a_pList->fdata4[i+1] = to_float3(data.s4567);
}
#endif
a_pList->currMaxProcTex = currMaxProcTex;
}
/**
\brief get color for precomputed procedural texture
\param a_texId - input tex id
\param a_pList - input ptl
\return texture color;
*/
static inline float4 readProcTex(int a_texId, const __private ProcTextureList* a_pList)
{
//for(int i=0; i<maxIter; i++)
//{
// if(a_texId == a_pList->id_f4[i])
// return to_float4(a_pList->fdata4[i], 0.0f);
//}
//
//return make_float4(1, 1, 1, -1.0f);
const int maxIter = (a_pList->currMaxProcTex < MAXPROCTEX) ? a_pList->currMaxProcTex : MAXPROCTEX; // min
float4 quad1 = make_float4(1, 1, 1, -1.0f);
quad1 = (0 < maxIter && a_texId == a_pList->id_f4[0]) ? to_float4(a_pList->fdata4[0], 0.0f) : quad1;
quad1 = (1 < maxIter && a_texId == a_pList->id_f4[1]) ? to_float4(a_pList->fdata4[1], 0.0f) : quad1;
quad1 = (2 < maxIter && a_texId == a_pList->id_f4[2]) ? to_float4(a_pList->fdata4[2], 0.0f) : quad1;
quad1 = (3 < maxIter && a_texId == a_pList->id_f4[3]) ? to_float4(a_pList->fdata4[3], 0.0f) : quad1;
float4 quad2 = make_float4(1, 1, 1, -1.0f);
quad2 = (4 < maxIter && a_texId == a_pList->id_f4[4]) ? to_float4(a_pList->fdata4[4], 0.0f) : quad2;
quad2 = (5 < maxIter && a_texId == a_pList->id_f4[5]) ? to_float4(a_pList->fdata4[5], 0.0f) : quad2;
quad2 = (6 < maxIter && a_texId == a_pList->id_f4[6]) ? to_float4(a_pList->fdata4[6], 0.0f) : quad2;
quad2 = (7 < maxIter && a_texId == a_pList->id_f4[7]) ? to_float4(a_pList->fdata4[7], 0.0f) : quad2;
const float4 quad12 = (quad1.w != -1.0f) ? quad1 : quad2;
float4 quad3 = make_float4(1, 1, 1, -1.0f);
quad3 = (8 < maxIter && a_texId == a_pList->id_f4[8]) ? to_float4(a_pList->fdata4[8], 0.0f) : quad3;
quad3 = (9 < maxIter && a_texId == a_pList->id_f4[9]) ? to_float4(a_pList->fdata4[9], 0.0f) : quad3;
quad3 = (10 < maxIter && a_texId == a_pList->id_f4[10]) ? to_float4(a_pList->fdata4[10], 0.0f) : quad3;
quad3 = (11 < maxIter && a_texId == a_pList->id_f4[11]) ? to_float4(a_pList->fdata4[11], 0.0f) : quad3;
float4 quad4 = make_float4(1, 1, 1, -1.0f);
quad4 = (12 < maxIter && a_texId == a_pList->id_f4[12]) ? to_float4(a_pList->fdata4[12], 0.0f) : quad4;
quad4 = (13 < maxIter && a_texId == a_pList->id_f4[13]) ? to_float4(a_pList->fdata4[13], 0.0f) : quad4;
quad4 = (14 < maxIter && a_texId == a_pList->id_f4[14]) ? to_float4(a_pList->fdata4[14], 0.0f) : quad4;
quad4 = (15 < maxIter && a_texId == a_pList->id_f4[15]) ? to_float4(a_pList->fdata4[15], 0.0f) : quad4;
const float4 quad34 = (quad3.w != -1.0f) ? quad3 : quad4;
return (quad12.w != -1.0f) ? quad12 : quad34;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct ShadowSampleT
{
float3 pos;
float3 color;
float pdf;
float maxDist;
float cosAtLight;
bool isPoint;
} ShadowSample;
static inline void WriteShadowSample(const __private ShadowSample* a_pSam, float a_lightProbSel, int a_lightOffset, int a_tid, int a_threadNum,
__global float4* a_out)
{
const float pdfAndIsPoint = a_pSam->isPoint ? (-1.0f)*a_pSam->pdf : a_pSam->pdf;
a_out[a_tid + a_threadNum*0] = make_float4(a_pSam->pos.x, a_pSam->pos.y, a_pSam->pos.z, pdfAndIsPoint);
a_out[a_tid + a_threadNum*1] = make_float4(a_pSam->color.x, a_pSam->color.y, a_pSam->color.z, a_pSam->maxDist);
a_out[a_tid + a_threadNum*2] = make_float4(a_pSam->cosAtLight, a_lightProbSel, as_float(a_lightOffset), 0);
}
static inline void ReadShadowSample(const __global float4* a_in, int a_tid, int a_threadNum,
__private ShadowSample* a_pSam, __private float* a_pLightProbSel, __private int* a_pLightOffset)
{
const float4 f0 = a_in[a_tid + a_threadNum*0];
const float4 f1 = a_in[a_tid + a_threadNum*1];
const float4 f2 = a_in[a_tid + a_threadNum*2];
a_pSam->pos.x = f0.x; a_pSam->pos.y = f0.y;
a_pSam->pos.z = f0.z; a_pSam->pdf = fabs(f0.w); a_pSam->isPoint = (f0.w <= 0); // this is ok, if pdf is 0, it can be only point light
a_pSam->color.x = f1.x; a_pSam->color.y = f1.y;
a_pSam->color.z = f1.z; a_pSam->maxDist = f1.w;
a_pSam->cosAtLight = f2.x; (*a_pLightProbSel) = f2.y;
(*a_pLightOffset) = as_int(f2.z);
}
/**
\brief Per ray accumulated (for all bounces) data.
*/
typedef struct ALIGN_S(16) PerRayAccT
{
float pdfGTerm; ///< accumulated G term equal to product of G(x1,x2,x3) for all bounces; for 3-Way we aqctually don't need it and mult it with "-1" to store first bounce specular flag from light direcction.
float pdfLightWP; ///< accumulated probability per projected solid angle for light path
float pdfCameraWP; ///< accumulated probability per projected solid angle for camera path
float pdfCamA0; ///< equal to pdfWP[0]*G[0] (if [0] means light)
} PerRayAcc;
static inline PerRayAcc InitialPerParAcc()
{
PerRayAcc res;
res.pdfGTerm = 1.0f;
res.pdfLightWP = 1.0f;
res.pdfCameraWP = 1.0f;
res.pdfCamA0 = 1.0f;
return res;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
typedef struct SurfaceHitT
{
float3 pos;
float3 normal;
float3 flatNormal;
float3 tangent;
float3 biTangent;
float2 texCoord;
//float2 texCoord1;
float2 texCoordCamProj;
int matId;
float t;
float sRayOff;
bool hfi;
} SurfaceHit;
#define PV_PACK_VALID_FIELD 1
#define PV_PACK_WASSP_FIELD 2
#define PV_PACK_HITFI_FIELD 4 // Hit From Inside
#define PV_PACK_RCONN_FIELD 8 // Ready For Connect (pack this if capera path don't hit light but store camera vertex istead).
#define SURFACE_HIT_SIZE_IN_F4 4
static inline void WriteSurfaceHit(const __private SurfaceHit* a_pHit, int a_tid, int a_threadNum,
__global float4* a_out)
{
const float4 f1 = to_float4(a_pHit->pos, a_pHit->texCoord.x);
const float4 f2 = to_float4(a_pHit->normal, a_pHit->texCoord.y);
const float4 f3 = make_float4(as_float( encodeNormal(a_pHit->flatNormal)),
as_float( encodeNormal(a_pHit->tangent)),
as_float( encodeNormal(a_pHit->biTangent)),
as_float( a_pHit->matId)
);
// ignore (hit.t, hit.sRayOff) because bpt don't need them!
const int bit3 = a_pHit->hfi ? PV_PACK_HITFI_FIELD : 0;
const float4 f4 = make_float4(a_pHit->t, a_pHit->sRayOff, 0, as_float(bit3));
a_out[a_tid + 0*a_threadNum] = f1;
a_out[a_tid + 1*a_threadNum] = f2;
a_out[a_tid + 2*a_threadNum] = f3;
a_out[a_tid + 3*a_threadNum] = f4;
}
static inline void WriteSurfaceHitMatId(const int a_matId, int a_tid, int a_threadNum,
__global float4* a_out)
{
const float4 f3 = make_float4(0, 0, 0, as_float(a_matId));
a_out[a_tid + 2*a_threadNum] = f3;
}
static inline void ReadSurfaceHit(const __global float4* a_in, int a_tid, int a_threadNum,
__private SurfaceHit* a_pHit)
{
const float4 f1 = a_in[a_tid + 0*a_threadNum];
const float4 f2 = a_in[a_tid + 1*a_threadNum];
const float4 f3 = a_in[a_tid + 2*a_threadNum];
const float4 f4 = a_in[a_tid + 3*a_threadNum];
a_pHit->pos = to_float3 (f1); a_pHit->texCoord.x = f1.w;
a_pHit->normal = to_float3 (f2); a_pHit->texCoord.y = f2.w;
a_pHit->flatNormal = decodeNormal(as_int(f3.x));
a_pHit->tangent = decodeNormal(as_int(f3.y));
a_pHit->biTangent = decodeNormal(as_int(f3.z));
a_pHit->matId = as_int(f3.w);
a_pHit->t = f4.x;
a_pHit->sRayOff = f4.y;
const int flags = as_int(f4.w);
a_pHit->hfi = ((flags & PV_PACK_HITFI_FIELD) != 0);
}
static inline int ReadSurfaceHitMatId(const __global float4* a_in, int a_tid, int a_threadNum)
{
const float4 f3 = a_in[a_tid + 2*a_threadNum];
return as_int(f3.w);
}
static inline float3 ReadSurfaceHitPos(const __global float4* a_in, int a_tid, int a_threadNum)
{
return to_float3(a_in[a_tid + 0*a_threadNum]);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
enum PLAIN_MAT_TYPES {
PLAIN_MAT_CLASS_PHONG_SPECULAR = 0,
PLAIN_MAT_CLASS_BLINN_SPECULAR = 1, // Micro Facet Torrance Sparrow model with Blinn distribution
PLAIN_MAT_CLASS_PERFECT_MIRROR = 2,
PLAIN_MAT_CLASS_THIN_GLASS = 3,
PLAIN_MAT_CLASS_GLASS = 4,
PLAIN_MAT_CLASS_TRANSLUCENT = 5,
PLAIN_MAT_CLASS_SHADOW_MATTE = 6,
PLAIN_MAT_CLASS_LAMBERT = 7,
PLAIN_MAT_CLASS_OREN_NAYAR = 8,
PLAIN_MAT_CLASS_BLEND_MASK = 9,
PLAIN_MAT_CLASS_EMISSIVE = 10,
PLAIN_MAT_CLASS_VOLUME_PERLIN = 11, // incactive currently
PLAIN_MAT_CLASS_SSS = 12, // incactive currently
PLAIN_MAT_CLASS_BECKMANN = 13, // anisotropic BRDF
PLAIN_MAT_CLASS_TRGGX = 14, // anisotropic BRDF
PLAIN_MAT_CLASS_GGX = 15, // isotropic and simple
};
enum PLAIN_MAT_FLAGS{
PLAIN_MATERIAL_IS_LIGHT = 1,
PLAIN_MATERIAL_CAST_CAUSTICS = 2,
PLAIN_MATERIAL_HAS_DIFFUSE = 4,
PLAIN_MATERIAL_HAS_TRANSPARENCY = 8,
PLAIN_MATERIAL_INVERT_NMAP_X = 16,
PLAIN_MATERIAL_INVERT_NMAP_Y = 32,
PLAIN_MATERIAL_INVERT_SWAP_NMAP_XY = 64,
PLAIN_MATERIAL_INVERT_HEIGHT = 128,
PLAIN_MATERIAL_SKIP_SHADOW = 256,
PLAIN_MATERIAL_FORBID_EMISSIVE_GI = 512,
PLAIN_MATERIAL_SKIP_SKY_PORTAL = 1024,
PLAIN_MATERIAL_EMISSION_FALOFF = 2048,
// This flag marks node as a real blend of different materials.
// It used for blending emissive properties and normal maps.
//
PLAIN_MATERIAL_SURFACE_BLEND = 4096,
PLAIN_MATERIAL_HAVE_BTDF = 8192,
PLAIN_MATERIAL_INVIS_LIGHT = 16384,
PLAIN_MATERIAL_CAN_SAMPLE_REFL_ONLY = 32768,
PLAIN_MATERIAL_HAVE_PROC_TEXTURES = 32768*2,
PLAIN_MATERIAL_LOCAL_AO1 = 32768*4,
PLAIN_MATERIAL_LOCAL_AO2 = 32768*8,
PLAIN_MATERIAL_CAMERA_MAPPED_REFL = 32768*16,
PLAIN_MATERIAL_EMISSIVE_SHADOW_CATCHER = 32768*32,
PLAIN_MATERIAL_CATCHER_FIX_BLACK_TRIANGLES = 32768*64,
PLAIN_MATERIAL_FLIP_TANGENT = 32768*128,
PLAIN_MATERIAL_ENERGY_FIX_OR_MULTISCATTER = 32768*256,
};
#define PLAIN_MATERIAL_DATA_SIZE 192
#define PLAIN_MATERIAL_CUSTOM_DATA_SIZE 80
#define MIX_TREE_MAX_DEEP 7
struct PlainMaterialT
{
float data[PLAIN_MATERIAL_DATA_SIZE];
};
typedef struct PlainMaterialT PlainMaterial;
// emissive component, always present in material to speed-up code
//
#define EMISSIVE_COLORX_OFFSET 4
#define EMISSIVE_COLORY_OFFSET 5
#define EMISSIVE_COLORZ_OFFSET 6
#define EMISSIVE_TEXID_OFFSET 7
#define EMISSIVE_TEXMATRIXID_OFFSET 8
#define EMISSIVE_LIGHTID_OFFSET 9
#define OPACITY_TEX_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+1)
#define OPACITY_TEX_MATRIX (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+2)
#define NORMAL_TEX_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+3)
#define NORMAL_TEX_MATRIX (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+4)
#define EMISSIVE_BLEND_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+5)
#define PARALLAX_HEIGHT (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+6)
#define EMISSIVE_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+8)
#define NORMAL_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+20)
#define OPACITY_SAMPLER_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+32)
// #define PROC_TEX1_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+44) // FREE SLOT!
// #define PROC_TEX2_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+45) // FREE SLOT!
// #define PROC_TEX3_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+46) // FREE SLOT!
// #define PROC_TEX4_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+47) // FREE SLOT!
// #define PROC_TEX5_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+48) // FREE SLOT!
#define PROC_TEX_TABLE_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+49)
#define PROC_TEX_AO_TYPE (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+50)
#define PROC_TEX_AO_SAMPLER (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+52)
#define PROC_TEX_TEX_ID (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+64)
#define PROC_TEXMATRIX_ID (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+65)
#define PROC_TEX_AO_LENGTH (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+66)
#define PROC_TEX_AO_TYPE2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+67)
#define PROC_TEX_AO_SAMPLER2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+68)
#define PROC_TEX_TEX_ID2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+80)
#define PROC_TEXMATRIX_ID2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+81)
#define PROC_TEX_AO_LENGTH2 (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+82)
#define PROC_TEX1_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+83)
#define PROC_TEXN_F4_HEAD_OFFSET (PLAIN_MATERIAL_CUSTOM_DATA_SIZE+99)
enum AO_TYPES { AO_TYPE_NONE = 0, AO_TYPE_UP = 1, AO_TYPE_DOWN = 2, AO_TYPE_BOTH = 4 };
#define PLAIN_MAT_TYPE_OFFSET 0
#define PLAIN_MAT_FLAGS_OFFSET 1
#define PLAIN_MAT_COMPONENTS_OFFSET 2
static inline int materialGetType (__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PLAIN_MAT_TYPE_OFFSET]); }
static inline int materialGetFlags (__global const PlainMaterial* a_pMat) { return as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]); }
static inline bool materialCastCaustics (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_CAST_CAUSTICS) != 0; }
static inline bool materialHasTransparency (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_HAS_TRANSPARENCY) != 0; }
static inline bool materialIsSkyPortal (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_SKIP_SKY_PORTAL) != 0; }
static inline bool materialIsInvisLight (__global const PlainMaterial* a_pMat) { return (as_int(a_pMat->data[PLAIN_MAT_FLAGS_OFFSET]) & PLAIN_MATERIAL_INVIS_LIGHT) != 0; }
static inline void PutProcTexturesIdListToMaterialHead(const ProcTextureList* a_pData, PlainMaterial* a_pMat)
{
for(int i=0;i<a_pData->currMaxProcTex;i++)
((int*)(a_pMat->data))[PROC_TEX1_F4_HEAD_OFFSET + i] = a_pData->id_f4[i];
for(int i=a_pData->currMaxProcTex; i<MAXPROCTEX; i++)
((int*)(a_pMat->data))[PROC_TEX1_F4_HEAD_OFFSET + i] = INVALID_TEXTURE;
}
static inline void GetProcTexturesIdListFromMaterialHead(__global const PlainMaterial* a_pMat, __private ProcTextureList* a_pData)
{
int currMaxProcTex;
for(currMaxProcTex = 0; currMaxProcTex < MAXPROCTEX; currMaxProcTex++)
{
const int texId = as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET + currMaxProcTex]);
a_pData->id_f4[currMaxProcTex] = texId;
if(texId == INVALID_TEXTURE)
break;
}
a_pData->currMaxProcTex = currMaxProcTex;
}
static inline bool materialHeadHaveTargetProcTex(__global const PlainMaterial* a_pMat, int a_texId)
{
return (as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+0]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+1]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+2]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+3]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+4]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+5]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+6]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+7]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+8]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+9]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+10]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+11]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+12]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+13]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+14]) == a_texId ||
as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET+15]) == a_texId);
}
static inline bool MaterialHaveAtLeastOneProcTex(__global const PlainMaterial* a_pMat)
{
return as_int(a_pMat->data[PROC_TEX1_F4_HEAD_OFFSET]) != INVALID_TEXTURE;
}
static inline bool MaterialHaveAO(__global const PlainMaterial* a_pMat)
{
return as_int(a_pMat->data[PROC_TEX_AO_TYPE]) != AO_TYPE_NONE;
}
static inline bool MaterialHaveAO2(__global const PlainMaterial* a_pMat)
{
return as_int(a_pMat->data[PROC_TEX_AO_TYPE]) != AO_TYPE_NONE && as_int(a_pMat->data[PROC_TEX_AO_TYPE2]) != AO_TYPE_NONE;
}
#define EVAL_FLAG_DEFAULT 0
#define EVAL_FLAG_DISABLE_CAUSTICS 1
#define EVAL_FLAG_FWD_DIR 2
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
\brief Select index proportional to piecewise constant function that is stored in a_accum[0 .. N-2]; Binary search version.
\param a_r - input random variable in rage [0, 1]
\param a_accum - input float array. it must be a result of prefix summ - i.e. it must be sorted.
\param N - size of extended array - i.e. a_accum[N-1] == summ(a_accum[0 .. N-2]).
\param pPDF - out parameter. probability of picking up found value.
\return found index
*/
static int SelectIndexPropToOpt(const float a_r, __global const float* a_accum, const int N,
__private float* pPDF)
{
int leftBound = 0;
int rightBound = N - 2; // because a_accum[N-1] == summ(a_accum[0 .. N-2]).
int counter = 0;
int currPos = -1;
const int maxStep = 50;
const float x = a_r*a_accum[N - 1];
while (rightBound - leftBound > 1 && counter < maxStep)
{
const int currSize = rightBound + leftBound;
const int currPos1 = (currSize % 2 == 0) ? (currSize + 1) / 2 : (currSize + 0) / 2;
const float a = a_accum[currPos1 + 0];
const float b = a_accum[currPos1 + 1];
if (a < x && x <= b)
{
currPos = currPos1;
break;
}
else if (x <= a)
rightBound = currPos1;
else if (x > b)
leftBound = currPos1;
counter++;
}
if (currPos < 0) // check the rest intervals
{
const float a1 = a_accum[leftBound + 0];
const float b1 = a_accum[leftBound + 1];
const float a2 = a_accum[rightBound + 0];
const float b2 = a_accum[rightBound + 1];
if (a1 < x && x <= b1)
currPos = leftBound;
if (a2 < x && x <= b2)
currPos = rightBound;
}
if (x == 0.0f)
currPos = 0;
else if (currPos < 0)
currPos = (rightBound + leftBound + 1) / 2;
(*pPDF) = (a_accum[currPos + 1] - a_accum[currPos]) / a_accum[N - 1];
return currPos;
}
/**
\brief search for for the lower bound (left range)
\param a - array
\param length - array size
\param left_range - value to search for
*/
static inline int binarySearchForLeftRange(__global const int2* a, int length, int left_range)
{
if (a[length - 1].x < left_range)
return -1;
int low = 0;
int high = length - 1;
while (low <= high)
{
int mid = low + ((high - low) / 2);
if (a[mid].x >= left_range)
high = mid - 1;
else //if(a[mid]<i)
low = mid + 1;
}
return high + 1;
}
/**
\brief search for for the upper bound (right range)
\param a - array
\param length - array size
\param left_range - value to search for
*/
static inline int binarySearchForRightRange(__global const int2* a, int length, int right_range)
{
if (a[0].x > right_range)
return -1;
int low = 0;
int high = length - 1;
while (low <= high)
{
int mid = low + ((high - low) / 2);
if (a[mid].x > right_range)
high = mid - 1;
else //if(a[mid]<i)
low = mid + 1;
}
return low - 1;
}
/**
\brief perform material id remap for instanced objects;
\param a_mId - input old material id
\param a_instId - input instance id
\param in_remapInst - array/table that maps instance id to remap list id
\param a_instTabSize - max instance id / size of 'in_remapInst' array
\param in_allMatRemapLists - all remap listss packed in to single array
\param in_remapTable - array/table that store offset inside 'in_allMatRemapLists' for each remap list which id we got from 'in_remapInst'
\papam a_remapTableSize - size of 'in_remapTable' array
\return new material id
*/
static inline int remapMaterialId(int a_mId, int a_instId,
__global const int* in_remapInst, int a_instTabSize,
__global const int* in_allMatRemapLists,
__global const int2* in_remapTable, int a_remapTableSize)
{
if (a_mId < 0 || a_instId < 0 || a_instId >= a_instTabSize || in_remapInst == 0 || in_allMatRemapLists == 0 || in_remapTable == 0)
return a_mId;
const int remapListId = in_remapInst[a_instId];
if(remapListId < 0 || remapListId >= a_remapTableSize) // || remapListId >= some size
return a_mId;
const int2 offsAndSize = in_remapTable[remapListId];
// int res = a_mId;
// for (int i = 0; i < offsAndSize.y; i++) // #TODO: change to binery search
// {
// int idRemapFrom = in_allMatRemapLists[offsAndSize.x + i * 2 + 0];
// int idRemapTo = in_allMatRemapLists[offsAndSize.x + i * 2 + 1];
//
// if (idRemapFrom == a_mId)
// {
// res = idRemapTo;
// break;
// }
// }
int low = 0;
int high = offsAndSize.y - 1;
while (low <= high)
{
const int mid = low + ((high - low) / 2);
const int idRemapFrom = in_allMatRemapLists[offsAndSize.x + mid * 2 + 0];
if (idRemapFrom >= a_mId)
high = mid - 1;
else //if(a[mid]<i)
low = mid + 1;
}
if (high+1 < offsAndSize.y)
{
const int idRemapFrom = in_allMatRemapLists[offsAndSize.x + (high + 1) * 2 + 0];
const int idRemapTo = in_allMatRemapLists[offsAndSize.x + (high + 1) * 2 + 1];
const int res = (idRemapFrom == a_mId) ? idRemapTo : a_mId;
return res;
}
else
return a_mId;
}
#define AO_RAYS_PACKED 4
static inline ushort4 compressShadow(float3 shadow)
{
ushort4 shadowCompressed;
shadowCompressed.x = (ushort)(65535.0f * shadow.x);
shadowCompressed.y = (ushort)(65535.0f * shadow.y);
shadowCompressed.z = (ushort)(65535.0f * shadow.z);
shadowCompressed.w = 0;
return shadowCompressed;
}
static inline float3 decompressShadow(ushort4 shadowCompressed)
{
const float invNormCoeff = 1.0f / 65535.0f;
return invNormCoeff*make_float3((float)shadowCompressed.x, (float)shadowCompressed.y, (float)shadowCompressed.z);
}
#define SPLIT_DL_BY_GRAMMAR true
//#define SBDPT_DEBUG_SPLIT 0
//#define SBDPT_DEBUG_DEPTH 4
//#define SBDPT_CHECK_BOUNCE 4
//#define SBDPT_INDIRECT_ONLY (void)
static inline float WrapVal(float a_val)
{
if (a_val > 1.0f)
return a_val - (float)((int)a_val);
else if(a_val < -1.0f)
return (float)((int)a_val) - a_val;
else
return a_val;
}
#endif
|
GB_unaryop__lnot_uint8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_fp32
// op(A') function: GB_tran__lnot_uint8_fp32
// C type: uint8_t
// A type: float
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_fp32
(
uint8_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pomp.h | // license:BSD-3-Clause
// copyright-holders:Couriersud
#ifndef POMP_H_
#define POMP_H_
///
/// \file pomp.h
///
/// Wrap all OPENMP stuff here in a hopefully c++ compliant way.
///
#include "pconfig.h"
#include "ptypes.h"
#include <cstdint>
#if PHAS_OPENMP
#include "omp.h"
#endif
namespace plib {
namespace omp {
template <typename I, class T>
void for_static(std::size_t numops, const I start, const I end, const T &what) noexcept(noexcept(what))
{
if (numops>1000)
{
#if PHAS_OPENMP && PUSE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for (I i = start; i < end; i++)
what(i);
}
else
for (I i = start; i < end; i++)
what(i);
}
template <typename I, class T>
void for_static(const I start, const I end, const T &what) noexcept(noexcept(what))
{
#if PHAS_OPENMP && PUSE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for (I i = start; i < end; i++)
what(i);
}
template <typename I, class T>
void for_static_np(const I start, const I end, const T &what) noexcept(noexcept(what))
{
for (I i = start; i < end; i++)
what(i);
}
inline void set_num_threads(const std::size_t threads) noexcept
{
#if PHAS_OPENMP && PUSE_OPENMP
omp_set_num_threads(threads);
#else
plib::unused_var(threads);
#endif
}
inline std::size_t get_max_threads() noexcept
{
#if PHAS_OPENMP && PUSE_OPENMP
return omp_get_max_threads();
#else
return 1;
#endif
}
// ----------------------------------------------------------------------------------------
// pdynlib: dynamic loading of libraries ...
// ----------------------------------------------------------------------------------------
} // namespace omp
} // namespace plib
#endif // PSTRING_H_
|
file.c | #include <stdio.h>
int main(){
#pragma omp parallel
{
printf("hello openmp!\n");
}
return 0;
}
|
race.c | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
int main()
{
int i, counter;
#pragma omp parallel
for (i = 0; i < 100000; i++)
counter++;
printf("counter=%d\n", counter);
return 0;
}
|
evolve_cc.c | /*
* The Connected Components Hamiltonian split uses a connected component search
* on the time step graph of the system to find isolated subsystems with fast
* interactions. These subsystems are then evolved at greater accuracy compared
* to the rest system.
* Equation numbers in comments refer to: J\"anes, Pelupessy, Portegies Zwart, A&A 2014 (doi:10.1051/0004-6361/201423831)
*/
#include <tgmath.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string.h>
#include "evolve.h"
#include "evolve_kepler.h"
#include "evolve_bs.h"
#include "evolve_shared.h"
#define BS_SUBSYS_SIZE 10
#define SHARED10_SUBSYS_SIZE 10
#define SHARED10_MIN_DT_RATIO (1./128) // if dtmin/dtmax > this do shared10, else try to find further CC
// (ie see if there are hard binaries)
struct ccsys
{
struct sys s;
struct ccsys *next_cc;
};
#define LOG_CC_SPLIT(C, R) \
{ \
LOG("clevel = %d s.n = %d c.n = {", clevel, s.n); \
for (struct ccsys *_ci = (C); _ci!=NULL; _ci = _ci->next_cc) printf(" %d ", _ci->s.n ); \
printf("} r.n = %d\n", (R).n); \
};
#define PRINTSYS(s) \
{ \
LOG("sys %d %d : {",s.n,s.nzero); \
for (UINT i=0;i<s.n-s.nzero;i++) printf(" %d", GETPART(s, i)->id); printf(" | "); \
for (UINT i=s.n-s.nzero;i<s.n;i++) printf(" %d", GETPART(s, i)->id); printf(" }\n"); \
};
#define PRINTOFFSETS(s) \
{ \
LOG("sysoffsets %d %d : {",s.n,s.nzero); \
for (UINT i=0;i<s.n-s.nzero;i++) printf(" %d", GETPART(s, i)-s.part); printf(" | "); \
for (UINT i=s.n-s.nzero;i<s.n;i++) printf(" %d", GETPART(s, i)-s.part); printf(" }\n"); \
};
#define LOGSYS_ID(SYS) for (UINT i = 0; i < (SYS).n; i++) { printf("%u ", GETPART(SYS, i)->id); } printf("\n");
#define LOGSYSp_ID(SYS) LOGSYS_ID(*SYS);
#define LOGSYSC_ID(SYS) for (struct ccsys *_ci = &(SYS); _ci!=NULL; _ci = _ci->next_cc) \
{printf("{"); for (UINT i = 0; i < _ci->s.n; i++) {printf("%u ", GETPART(_ci->s,i)->id); } printf("}\t");} printf("\n");
void split_cc(int clevel,struct sys s, struct ccsys **c, struct sys *r, DOUBLE dt) {
/*
* split_cc: run a connected component search on sys s with threshold dt,
* creates a singly-linked list of connected components c and a rest system r
* c or r is set to zerosys if no connected components/rest is found
*/
int dir=SIGN(dt);
dt=fabs(dt);
diag->tstep[clevel]++; // not directly comparable to corresponding SF-split statistics
struct ccsys **c_next;
if(s.n<=1) ENDRUN("This does not look right...");
c_next = c;
if(*c_next!=NULL) ENDRUN("should start with zero pointer");
UINT processed = 0; // increase if something is added from the stack to the cc
struct particle **active, *comp_next, *compzero_next; // current active particle, and next in the mass and massless part
UINT comp_size, compzero_size;
struct particle *stack_next=NULL, *stackzero_next=NULL; // two pointers keeping track of stack in massive and massless part
UINT stack_size, stackzero_size; // counter for total size of stack and zero
struct particle *rest_next=NULL, *restzero_next=NULL; // swap this to add to the rest-system
// find connected components
if(s.n-s.nzero>0) stack_next=s.part;
if(s.n-s.nzero>0) rest_next=LAST(s);
if(s.nzero>0) stackzero_next=s.zeropart;
if(s.nzero>0) restzero_next=LASTZERO(s);
comp_next=stack_next;
compzero_next=stackzero_next;
while (processed < s.n)
{
if(stack_next!=comp_next) ENDRUN("consistency error in split_cc\n")
if(stackzero_next!=compzero_next) ENDRUN("consistency error in split_cc\n")
//~ if(stack_next==rest_next && stackzero_next==restzero_next) ENDRUN("impossible")
// startup stack
comp_size=0;
compzero_size=0;
if(stack_next!=NULL && stack_next<rest_next+1)
{
//~ LOG("stack_next init\n");
stack_next++;
stack_size=1;
}
if(comp_next==stack_next && stackzero_next!=NULL && stackzero_next<restzero_next+1)
{
//~ LOG("stackzero_next init\n");
stackzero_next++;
stack_size=1;
}
if(stack_next==comp_next && stackzero_next==compzero_next) ENDRUN("impossible")
// search for the next connected component
while (stack_size > 0)
{
//~ LOG("stack_size %d\n", stack_size);
active=NULL;
if(stack_next!=NULL &&
stack_next-comp_next>0) {active=&comp_next;}
else
if(stackzero_next!=NULL &&
stackzero_next-compzero_next>0) {active=&compzero_next;}
if(active==NULL) ENDRUN("no active, while stack still >0\n");
// iterate over all unvisited elements
if(stack_next!=NULL)
{
//~ LOG("check massive %d\n", rest_next-stack_next+1);
for (struct particle *i = stack_next; i <= rest_next; i++)
{
diag->tcount[clevel]++;
// if element is connected to the first element of the stack
if ( ((DOUBLE) timestep_ij(*active, i,dir)) <= dt)
{
// add i to the end of the stack by swapping stack_next and i
//~ LOG("stack_next add %d\n", i->id);
//~ LOG("stack offsets: %d, %d\n", stack_next-s.part, i-s.part);
SWAP( *stack_next , *i, struct particle );
stack_next++;
stack_size++;
}
}
}
// iterate over all unvisited elements, skip when active is zero mass
if(stackzero_next!=NULL && active!=&compzero_next)
{
//~ LOG("check zero %d\n", restzero_next-stackzero_next+1);
for (struct particle *i = stackzero_next; i <= restzero_next; i++)
{
diag->tcount[clevel]++;
// if element is connected to the first element of the stack
if ( ((DOUBLE) timestep_ij(*active, i,dir)) <= dt)
{
// add i to the end of the stack by swapping stack_next and i
//~ LOG("stackzero_next add %d\n", i->id);
//~ LOG("stack offsets: %d, %d\n", stackzero_next-s.part, i-s.part);
SWAP( *stackzero_next , *i, struct particle );
stackzero_next++;
stack_size++;
}
}
}
// pop the stack
(*active)++;
if(active==&compzero_next) compzero_size++;
comp_size++;
stack_size--;
//~ LOG("popped %d, %d, %d\n", stack_size, comp_size, compzero_size);
}
processed += comp_size;
//~ LOG("comp finish %d, %d\n", comp_size, compzero_size);
// new component is non-trivial: create a new sys
if (comp_size > 1)
{
//~ LOG("split_cc: found component with size: %d %d\n", comp_size, compzero_size);
//~ LOG("%d %d \n", comp_next-stack_next, compzero_next-stackzero_next);
*c_next=(struct ccsys*) malloc( sizeof(struct ccsys) );
struct sys *new=&((*c_next)->s);
*new=zerosys;
new->n = comp_size;
new->nzero = compzero_size;
if(comp_size-compzero_size>0)
{
new->part = comp_next - (comp_size-compzero_size);
}
if(compzero_size>0)
{
new->zeropart = compzero_next - compzero_size;
}
if(new->part==NULL) new->part=new->zeropart;
//~ PRINTSYS((*c_next));
//~ PRINTOFFSETS((*c_next));
(*c_next)->next_cc = NULL;
c_next = &((*c_next)->next_cc);
}
else // new component is trivial: add to rest, reset pointers
{
//~ LOG("split_cc: add to rest\n");
//~ LOG("%d %d \n", comp_next-stack_next, compzero_next-stackzero_next);
if(active==&comp_next)
{
comp_next--;
//~ LOG("r1 check offsets: %d, %d\n", comp_next-s.part, rest_next-s.part);
SWAP( *comp_next, *rest_next, struct particle );
rest_next--;
stack_next--;
} else
{
compzero_next--;
//~ LOG("r2 check offsets: %d, %d\n", compzero_next-s.part, restzero_next-s.part);
SWAP( *compzero_next, *restzero_next, struct particle );
restzero_next--;
stackzero_next--;
}
}
}
if(stack_next!=NULL && stack_next!=rest_next+1) ENDRUN("unexpected")
if(stackzero_next!=NULL && stackzero_next!=restzero_next+1) ENDRUN("unexpected")
// create the rest system
*r=zerosys;
if(rest_next!=NULL) r->n = LAST(s) - rest_next;
if(restzero_next!=NULL) r->nzero = LASTZERO(s) - restzero_next;
r->n+=r->nzero;
if (r->n-r->nzero > 0)
{
r->part = rest_next+1;
}
if (r->nzero > 0)
{
r->zeropart = restzero_next+1;
}
if(r->part==NULL) r->part=r->zeropart;
if (processed != s.n)
{
ENDRUN("split_cc particle count mismatch: processed=%u s.n=%u r->n=%u\n", processed, s.n, r->n);
}
//~ LOG("exit with %d %d\n", s.n, s.nzero);
}
void split_cc_verify(int clevel,struct sys s, struct ccsys *c, struct sys r) {
/*
* split_cc_verify: explicit verification if connected components c and rest system r form a correct
* connected components decomposition of the system.
*/
//~ LOG("split_cc_verify ping s.n=%d r->n=%d\n", s.n, r->n);
//LOG_CC_SPLIT(c, r);
UINT pcount_check = 0;
for (UINT i = 0; i < s.n; i++)
{
pcount_check = 0;
UINT particle_found = 0;
struct particle *p = GETPART(s, i);
for (struct ccsys *cj = c; cj!=NULL; cj = cj->next_cc)
{
verify_split_zeromass(cj->s);
pcount_check += cj->s.n;
//~ //LOG("%d\n", pcount_check);
// search for p in connected components
for (UINT k = 0; k < cj->s.n; k++)
{
struct particle * pk = GETPART( cj->s,k);
// is pk equal to p
if (p->id == pk->id)
{
particle_found += 1;
//~ LOG("split_cc_verify: found %d in a cc\n",i);
}
}
if (cj->s.n-cj->s.nzero>0 && ( GETPART( cj->s, cj->s.n - cj->s.nzero - 1) != LAST(cj->s) ))
{
LOG("split_cc_verify: last pointer for c is not set correctly!\n");
LOG_CC_SPLIT(c, r);
ENDRUN("data structure corrupted\n");
}
if (cj->s.nzero>0 && ( GETPART( cj->s, cj->s.n-1) != LASTZERO(cj->s) ))
{
LOG("split_cc_verify: last pointer for c is not set correctly!\n");
LOG_CC_SPLIT(c, r);
ENDRUN("data structure corrupted\n");
}
}
verify_split_zeromass(r);
// search for p in rest
for (UINT k = 0; k < r.n; k++)
{
struct particle * pk = GETPART( r, k);
// is pk equal to p
if (p->id == pk->id)
{
particle_found += 1;
//~ LOG("found at r\n")
}
}
if (particle_found != 1)
{
LOG("split_cc_verify: particle %d (%d) particle_found=%d\n", i, p->id, particle_found);
LOG_CC_SPLIT(c, r);
ENDRUN("data structure corrupted\n");
}
}
if (pcount_check + r.n != s.n)
{
LOG("split_cc_verify: particle count mismatch (%d %d)\n", pcount_check + r.n, s.n);
LOG_CC_SPLIT(c, r);
ENDRUN("data structure corrupted\n");
//ENDRUN("split_cc_verify: particle count mismatch\n");
}
else
{
//~ LOG("split_cc_verify pong\n");
}
//ENDRUN("Fin.\n");
}
void split_cc_verify_ts(int clevel,struct ccsys *c, struct sys r, DOUBLE dt)
{
DOUBLE ts_ij;
int dir=SIGN(dt);
dt=fabs(dt);
// verify C-C interactions
for (struct ccsys *ci = c; ci!=NULL; ci = ci->next_cc)
{
for (UINT i = 0; i < ci->s.n; i++)
{
for (struct ccsys *cj = c; cj!=NULL; cj = cj->next_cc)
{
if (ci == cj)
{
continue;
}
for (UINT j = 0; j < cj->s.n; j++)
{
ts_ij = (DOUBLE) timestep_ij(GETPART( ci->s, i), GETPART( cj->s, j), dir);
//LOG("comparing %d %d\n", GETPART( ci->s, i)-> id, GETPART( cj->s, j)->id);
//LOG("%f %f \n", ts_ij, dt);
if (dt > ts_ij)
{
ENDRUN("split_cc_verify_ts C-C timestep underflow\n");
}
}
}
}
}
// verify C-R interactions
for (struct ccsys *ci = c; ci!=NULL; ci = ci->next_cc)
{
for (UINT i = 0; i < ci->s.n; i++)
{
for (UINT j = 0; j < r.n; j++)
{
ts_ij = (DOUBLE) timestep_ij( GETPART(ci->s, i), GETPART(r, j),dir);
if (ts_ij < dt)
{
ENDRUN("split_cc_verify_ts C-R timestep underflow\n");
}
}
}
}
// verify R interactions
for (UINT i = 0; i < r.n; i++)
{
for (UINT j = 0; j < r.n; j++)
{
if (i == j) continue;
ts_ij = (DOUBLE) timestep_ij( GETPART(r, i), GETPART(r,j),dir);
if (ts_ij < dt)
{
ENDRUN("split_cc_verify_ts R-R timestep underflow\n");
}
}
}
}
// TODO rename to cc_free_sys?
void free_sys(struct ccsys * s)
{
if (s==NULL) return;
if (s->next_cc != NULL)
{
free_sys(s->next_cc);
}
free(s);
}
#define TASKCONDITION (nc > 1 && s.n>BS_SUBSYS_SIZE)
void evolve_cc2(int clevel,struct sys s, DOUBLE stime, DOUBLE etime, DOUBLE dt, int inttype, int recenter)
{
DOUBLE cmpos[3],cmvel[3];
int recentersub=0;
struct ccsys *c = NULL;
struct sys r = zerosys;
CHECK_TIMESTEP(etime,stime,dt,clevel);
if ((s.n == 2 || s.n-s.nzero<=1 )&&
(inttype==CCC_KEPLER || inttype==CC_KEPLER || inttype==CCC_BS ||
inttype==CC_BS || inttype==CCC_BSA || inttype==CC_BSA || inttype==CC_SHARED10 || inttype==CCC_SHARED10))
//~ if (s.n == 2 && (inttype==CCC_KEPLER || inttype==CC_KEPLER))
{
evolve_kepler(clevel,s, stime, etime, dt);
return;
}
if(recenter && (inttype==CCC || inttype==CCC_KEPLER || inttype==CCC_BS || inttype==CCC_BSA || inttype==CCC_SHARED10))
{
system_center_of_mass(s,cmpos,cmvel);
move_system(s,cmpos,cmvel,-1);
evolve_cc2(clevel, s, stime, etime, dt, inttype, 0);
for(int i=0;i<3;i++) cmpos[i]+=cmvel[i]*dt;
move_system(s,cmpos,cmvel,1);
return;
}
if (s.n <= BS_SUBSYS_SIZE && (inttype==CCC_BS ||inttype==CC_BS))
{
evolve_bs(clevel,s, stime, etime, dt);
return;
}
if (s.n <= BS_SUBSYS_SIZE && (inttype==CCC_BSA ||inttype==CC_BSA))
{
evolve_bs_adaptive(clevel,s, stime, etime, dt, -1.);
return;
}
if (s.n <= SHARED10_SUBSYS_SIZE && (inttype==CCC_SHARED10 ||inttype==CC_SHARED10))
{
timestep(clevel,s,s,SIGN(dt));
FLOAT dtmax=max_global_timestep(s);
FLOAT dtmin=global_timestep(s);
if(dtmin/dtmax>SHARED10_MIN_DT_RATIO)
{
evolve_shared10(clevel,s, stime, etime, dt, -1.);
return;
}
}
#ifdef CONSISTENCY_CHECKS
if (clevel == 0)
{
printf("consistency_checks: %d %d \n", s.n, clevel);
}
#endif
#ifdef CONSISTENCY_CHECKS
// debug: make a copy of s to verify that the split has been done properly
struct sys s_before=zerosys;
s_before.n = s.n;
s_before.nzero = s.nzero;
s_before.part = (struct particle*) malloc(s.n*sizeof(struct particle));
if(s_before.nzero>0) s_before.zeropart = s_before.part+(s_before.n-s_before.nzero);
for(UINT i=0; i<s.n;i++) *GETPART(s_before, i)=*GETPART(s,i);
#endif
/*
split_cc() decomposes particles in H (eq 25) into:
1) K non-trivial connected components C_1..C_K
2) Rest set R
*/
split_cc(clevel,s, &c, &r, dt);
//if (s.n != c.n) LOG_CC_SPLIT(&c, &r); // print out non-trivial splits
#ifdef CONSISTENCY_CHECKS
/*
if (s.n != r.n) {
LOG("s: ");
LOGSYS_ID(s_before);
LOG("c: ");
LOGSYSC_ID(*c);
LOG("r: ");
LOGSYS_ID(r);
}
*/
// verify the split
split_cc_verify(clevel,s_before, c, r);
split_cc_verify_ts(clevel, c, r, dt);
free(s_before.part);
if (clevel == 0) {
printf("ok \n");
}
#endif
if (c==NULL) {
diag->deepsteps++;
diag->simtime+=dt;
}
// Independently integrate every C_i at reduced pivot time step h/2 (1st time)
int nc=0; for (struct ccsys *ci = c; ci!=NULL; ci = ci->next_cc) nc++;
if(nc>1 || r.n>0) recentersub=1;
for (struct ccsys *ci = c; ci!=NULL; ci = ci->next_cc)
{
#ifdef _OPENMP
if( TASKCONDITION )
{
diag->ntasks[clevel]++;
diag->taskcount[clevel]+=ci->s.n;
#pragma omp task firstprivate(clevel,ci,stime,dt,recentersub) untied
{
struct sys lsys=zerosys;
lsys.n=ci->s.n;
lsys.nzero=ci->s.nzero;
struct particle* lpart=(struct particle*) malloc(lsys.n*sizeof(struct particle));
lsys.part=lpart;
if(lsys.nzero>0) lsys.zeropart=lsys.part+(lsys.n-lsys.nzero);
for(UINT i=0;i<lsys.n;i++) *GETPART(lsys,i)=*GETPART(ci->s,i);
evolve_cc2(clevel+1,lsys, stime, stime+dt/2, dt/2,inttype,recentersub);
for(UINT i=0;i<lsys.n;i++) *GETPART(ci->s,i)=*GETPART(lsys,i);
free(lpart);
}
} else
#endif
{
evolve_cc2(clevel+1,ci->s, stime, stime+dt/2, dt/2,inttype,recentersub);
}
}
#pragma omp taskwait
// Apply drifts and kicks at current pivot time step (eq 30)
if(r.n>0) drift(clevel,r, stime+dt/2, dt/2); // drift r, 1st time
// kick ci <-> cj (eq 23)
for (struct ccsys *ci = c; ci!=NULL; ci = ci->next_cc)
{
for (struct ccsys *cj = c; cj!=NULL; cj = cj->next_cc)
{
if (ci != cj)
{
kick(clevel,ci->s, cj->s, dt);
//kick(*cj, *ci, dt);
}
}
}
// kick c <-> rest (eq 24)
if(r.n>0) for (struct ccsys *ci = c; ci!=NULL; ci = ci->next_cc)
{
kick(clevel,r, ci->s, dt);
kick(clevel,ci->s, r, dt);
}
if(r.n>0) kick(clevel,r, r, dt); // kick rest (V_RR)
if(r.n>0) drift(clevel,r, etime, dt/2); // drift r, 2nd time
// Independently integrate every C_i at reduced pivot time step h/2 (2nd time, eq 27)
for (struct ccsys *ci = c; ci!=NULL; ci = ci->next_cc)
{
#ifdef _OPENMP
if (TASKCONDITION)
{
diag->ntasks[clevel]++;
diag->taskcount[clevel]+=ci->s.n;
#pragma omp task firstprivate(clevel,ci,stime,etime,dt,recentersub) untied
{
struct sys lsys=zerosys;
lsys.n=ci->s.n;
lsys.nzero=ci->s.nzero;
struct particle* lpart=(struct particle*) malloc(lsys.n*sizeof(struct particle));
lsys.part=lpart;
if(lsys.nzero>0) lsys.zeropart=lsys.part+(lsys.n-lsys.nzero);
for(UINT i=0;i<lsys.n;i++) *GETPART(lsys,i)=*GETPART(ci->s,i);
evolve_cc2(clevel+1,lsys, stime+dt/2, etime, dt/2,inttype,recentersub);
for(UINT i=0;i<lsys.n;i++) *GETPART(ci->s,i)=*GETPART(lsys,i);
free(lpart);
}
} else
#endif
{
evolve_cc2(clevel+1,ci->s, stime+dt/2, etime, dt/2,inttype,recentersub);
}
}
#pragma omp taskwait
free_sys(c);
}
// not actually helpful I think; needs testing
void evolve_cc2_shortcut(int clevel,struct sys s, DOUBLE stime, DOUBLE etime, DOUBLE dt, int inttype, int recenter, FLOAT dtsys)
{
CHECK_TIMESTEP(etime,stime,dt,clevel);
if(dtsys<0)
{
timestep(clevel,s,s,SIGN(dt));
dtsys=max_global_timestep(s);
}
if(dtsys < fabs(dt))
{
evolve_cc2_shortcut(clevel+1,s,stime, stime+dt/2,dt/2, inttype, recenter, dtsys);
evolve_cc2_shortcut(clevel+1,s,stime+dt/2, etime,dt/2, inttype, recenter, -1.);
}
else
{
evolve_cc2(clevel,s, stime, etime, dt, inttype, recenter);
}
}
#undef TASKCONDITION
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(24*t2-Nz,4)),3*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(12*t1+Ny+21,4)),floord(24*t2+Ny+20,4)),floord(24*t1-24*t2+Nz+Ny+19,4));t3++) {
for (t4=max(max(max(0,ceild(3*t1-255,256)),ceild(24*t2-Nz-1020,1024)),ceild(4*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t3+Nx,1024),floord(Nt+Nx-4,1024)),floord(12*t1+Nx+21,1024)),floord(24*t2+Nx+20,1024)),floord(24*t1-24*t2+Nz+Nx+19,1024));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),4*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),4*t3+2),1024*t4+1022),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
inner_product.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
In this file, the inner product weights are computed.
*/
#include <stdlib.h>
#include <stdio.h>
#include <geos95.h>
#include "../../src/game_types.h"
#include "include.h"
int calc_inner_product(double inner_product_weights[], double normal_distance[], double volume[], int to_index[], int from_index[], double area[], double z_scalar[], double z_vector[], int adjacent_vector_indices_h[])
{
/*
This function computes the geometrical weights for computing the inner product.
*/
int layer_index, h_index;
double delta_z;
#pragma omp parallel for private(layer_index, h_index, delta_z)
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
for (int j = 0; j < 6; ++j)
{
if (j < 5 || h_index >= NO_OF_PENTAGONS)
{
inner_product_weights[8*i + j] = area[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + adjacent_vector_indices_h[6*h_index + j]];
inner_product_weights[8*i + j] = inner_product_weights[8*i + j]*normal_distance[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + adjacent_vector_indices_h[6*h_index + j]];
inner_product_weights[8*i + j] = inner_product_weights[8*i + j]/(2*volume[i]);
}
else
{
inner_product_weights[8*i + j] = 0;
}
}
// upper w
if (layer_index == 0)
{
delta_z = 2*(z_vector[h_index] - z_scalar[i]);
}
else
{
delta_z = z_scalar[i - NO_OF_SCALARS_H] - z_scalar[i];
}
inner_product_weights[8*i + 6] = area[h_index + layer_index*NO_OF_VECTORS_PER_LAYER]*delta_z/(2*volume[i]);
// lower w
if (layer_index == NO_OF_LAYERS - 1)
{
delta_z = 2*(z_scalar[i] - z_vector[NO_OF_LAYERS*NO_OF_VECTORS_PER_LAYER + h_index]);
}
else
{
delta_z = z_scalar[i] - z_scalar[i + NO_OF_SCALARS_H];
}
inner_product_weights[8*i + 7] = area[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER]*delta_z/(2*volume[i]);
}
return 0;
}
|
convolution_sgemm_pack4to16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4to16_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 16)
tmp.create(16 * maxk, inch, size / 16 + size % 16, 16u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator);
{
int nn_size = size >> 4;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 16;
float* tmpptr = tmp.channel(i / 16);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x16
__m128 _r0 = _mm_load_ps(img0);
__m128 _r1 = _mm_load_ps(img0 + 4);
__m128 _r2 = _mm_load_ps(img0 + 4 * 2);
__m128 _r3 = _mm_load_ps(img0 + 4 * 3);
__m128 _r4 = _mm_load_ps(img0 + 4 * 4);
__m128 _r5 = _mm_load_ps(img0 + 4 * 5);
__m128 _r6 = _mm_load_ps(img0 + 4 * 6);
__m128 _r7 = _mm_load_ps(img0 + 4 * 7);
__m128 _r8 = _mm_load_ps(img0 + 4 * 8);
__m128 _r9 = _mm_load_ps(img0 + 4 * 9);
__m128 _ra = _mm_load_ps(img0 + 4 * 10);
__m128 _rb = _mm_load_ps(img0 + 4 * 11);
__m128 _rc = _mm_load_ps(img0 + 4 * 12);
__m128 _rd = _mm_load_ps(img0 + 4 * 13);
__m128 _re = _mm_load_ps(img0 + 4 * 14);
__m128 _rf = _mm_load_ps(img0 + 4 * 15);
_MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3);
_MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7);
_MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb);
_MM_TRANSPOSE4_PS(_rc, _rd, _re, _rf);
_mm_store_ps(tmpptr, _r0);
_mm_store_ps(tmpptr + 4, _r4);
_mm_store_ps(tmpptr + 4 * 2, _r8);
_mm_store_ps(tmpptr + 4 * 3, _rc);
_mm_store_ps(tmpptr + 4 * 4, _r1);
_mm_store_ps(tmpptr + 4 * 5, _r5);
_mm_store_ps(tmpptr + 4 * 6, _r9);
_mm_store_ps(tmpptr + 4 * 7, _rd);
_mm_store_ps(tmpptr + 4 * 8, _r2);
_mm_store_ps(tmpptr + 4 * 9, _r6);
_mm_store_ps(tmpptr + 4 * 10, _ra);
_mm_store_ps(tmpptr + 4 * 11, _re);
_mm_store_ps(tmpptr + 4 * 12, _r3);
_mm_store_ps(tmpptr + 4 * 13, _r7);
_mm_store_ps(tmpptr + 4 * 14, _rb);
_mm_store_ps(tmpptr + 4 * 15, _rf);
img0 += size * 4;
tmpptr += 64;
}
}
}
remain_size_start += nn_size << 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 16 + i % 16);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
__m128 _val = _mm_load_ps(img0);
_mm_store_ps(tmpptr, _val);
img0 += size * 4;
tmpptr += 4;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[16] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 16 : zeros;
int i = 0;
for (; i + 15 < size; i += 16)
{
float* tmpptr = tmp.channel(i / 16);
const float* kptr = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
__m512 _sum4 = _sum0;
__m512 _sum5 = _sum0;
__m512 _sum6 = _sum0;
__m512 _sum7 = _sum0;
__m512 _sum8 = _sum0;
__m512 _sum9 = _sum0;
__m512 _suma = _sum0;
__m512 _sumb = _sum0;
__m512 _sumc = _sum0;
__m512 _sumd = _sum0;
__m512 _sume = _sum0;
__m512 _sumf = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(tmpptr[4]);
__m512 _val5 = _mm512_set1_ps(tmpptr[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(tmpptr[6]);
__m512 _val7 = _mm512_set1_ps(tmpptr[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(tmpptr[8]);
__m512 _val9 = _mm512_set1_ps(tmpptr[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(tmpptr[10]);
__m512 _valb = _mm512_set1_ps(tmpptr[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
__m512 _valc = _mm512_set1_ps(tmpptr[12]);
__m512 _vald = _mm512_set1_ps(tmpptr[13]);
_sumc = _mm512_fmadd_ps(_valc, _w0, _sumc);
_sumd = _mm512_fmadd_ps(_vald, _w0, _sumd);
__m512 _vale = _mm512_set1_ps(tmpptr[14]);
__m512 _valf = _mm512_set1_ps(tmpptr[15]);
_sume = _mm512_fmadd_ps(_vale, _w0, _sume);
_sumf = _mm512_fmadd_ps(_valf, _w0, _sumf);
kptr += 16;
tmpptr += 16;
}
_mm512_store_ps(outptr0, _sum0);
_mm512_store_ps(outptr0 + 16, _sum1);
_mm512_store_ps(outptr0 + 16 * 2, _sum2);
_mm512_store_ps(outptr0 + 16 * 3, _sum3);
_mm512_store_ps(outptr0 + 16 * 4, _sum4);
_mm512_store_ps(outptr0 + 16 * 5, _sum5);
_mm512_store_ps(outptr0 + 16 * 6, _sum6);
_mm512_store_ps(outptr0 + 16 * 7, _sum7);
_mm512_store_ps(outptr0 + 16 * 8, _sum8);
_mm512_store_ps(outptr0 + 16 * 9, _sum9);
_mm512_store_ps(outptr0 + 16 * 10, _suma);
_mm512_store_ps(outptr0 + 16 * 11, _sumb);
_mm512_store_ps(outptr0 + 16 * 12, _sumc);
_mm512_store_ps(outptr0 + 16 * 13, _sumd);
_mm512_store_ps(outptr0 + 16 * 14, _sume);
_mm512_store_ps(outptr0 + 16 * 15, _sumf);
outptr0 += 16 * 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 16 + i % 16);
const float* kptr = kernel.channel(p);
int nn = inch * maxk * 4; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
kptr += 16;
tmpptr += 1;
}
_mm512_store_ps(outptr0, _sum0);
outptr0 += 16;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4to16_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 16b-4a-maxk-inch/4a-outch/16b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(16 * 4 * maxk, inch / 4, outch / 16, (size_t)4u);
for (int q = 0; q + 15 < outch; q += 16)
{
float* g00 = kernel_tm.channel(q / 16);
for (int p = 0; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m128 _val = _mm_load_ps(sptr);
_mm_store_ps(ptr, _val);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
raytracer.h | #pragma once
#include <algorithm>
#include <memory>
#include <set>
#include <chrono>
#include <glm/glm.hpp>
#include "camera.h"
#include "entities.h"
#include "image.h"
#include "octree.h"
#include "omp.h"
#include <ctime>
#include "util.h"
#include "halton_enum.h"
#include "halton_sampler.h"
#include "photonMap.h"
//#define DEBUG_OCTREE
class RayTracer
{
public:
int p = 0;
int width, height;
RayTracer() = delete;
RayTracer(const Camera& camera)
: _camera(camera), _image(std::make_shared<Image>(0, 0))
{
};
void setScene(Octree* scene)
{
_scene = scene;
_photon_map = new PhotonMap(_scene->_root._bbox.min, _scene->_root._bbox.max);
}
void run(int w, int h)
{
std::cout << "starting raytracer with frame size: " << w << ", " << h << "\n";
srand(std::time(0));
Halton_sampler sampler;
sampler.init_faure();
Halton_enum halton_enum(w, h);
_image = std::make_shared<Image>(w, h);
width = w;
height = h;
if (!_scene->valid)
{
_scene->rebuild();
}
if (!_photon_map->valid)
{
auto start = std::chrono::high_resolution_clock::now();
std::cout << "emitting photons...\n";
tracePhotons(5, photons, sampler, halton_enum);
auto end = std::chrono::high_resolution_clock::now();
std::cout << "photon time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count()/1000.0 << " s\n";
_photon_map->rebuild();
//_scene->rebuild();
}
double sensorHalfWidth = (_camera.sensorDiag*w)/(sqrt((double)w*w + h*h));
double sensorHalfHeight = sensorHalfWidth * ((double)h/w);
glm::dvec3 screenCenter = _camera.pos + _camera.focalDist*_camera.forward;
glm::dvec3 cameraRight = glm::normalize(glm::cross(_camera.forward, _camera.up));
std::cout << cameraRight.x << ", " << cameraRight.y << ", " << cameraRight.z << ", " << "\n";
double avgTests = 0;
std::vector<double> vars;
vars.reserve(w*h);
std::vector<double> xrand;
subrand(xrand, 25000);
std::vector<double> yrand;
subrand(yrand, 25000);
// The structure of the for loop should remain for incremental rendering.
#pragma omp parallel for schedule(dynamic, 10) //OpenMP
for (int y = 0; y < h; ++y)
{
srand(std::time(0) + rand());
if(_running)
{
for (int x = 0; x < w; ++x)
{
glm::dvec3 color(0.5, 0.5, 0.5);
glm::dvec3 lastCol(0, 0, 0);
double var = 0;
int samps = 0;
int s = 0;
while (s < max_samples && samps < min_samples)
{
lastCol = color;
int idx = halton_enum.get_index(s, x, y);
double xr = sampler.sample(0, idx);
double yr = sampler.sample(1, idx);
//xr = .2*fmod(halton_enum.scale_x(xr), 1.0) + .8*1*xrand[((x + w*y)*max_samples + s) % xrand.size()];// + 0*drand();
//yr = .2*fmod(halton_enum.scale_y(yr), 1.0) + .8*1*yrand[((x + w*y)*max_samples + s) % yrand.size()];// + 0*drand();
//std::cout << (xr - yr) << "\n";
double dx = halton_enum.scale_x(xr);// (double)x + AA_JITTER*xrand[((x + w*y)*max_samples + s) % xrand.size()];
double dy = halton_enum.scale_y(yr);// (double)y + AA_JITTER*yrand[((x + w*y)*max_samples + s) % yrand.size()];
glm::dvec3 pixelPos = screenCenter + (sensorHalfWidth*(dx / w - .5))*cameraRight - (sensorHalfHeight*(dy / h - .5))*_camera.up;
glm::dvec3 eyePos = _camera.pos + FOCAL_BLUR*(xr - .5)*cameraRight + FOCAL_BLUR*(yr - .5) *_camera.up;
Ray ray(eyePos, glm::normalize(pixelPos - eyePos));
if (s == 0)
color = radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1));
else
color = (1.0*s*color + radiance(ray, 0, sampler, halton_enum, /*(x + w*y)*SAMPLES + s*/ idx, glm::dvec3(1, 1, 1)))*(1.0 / (s + 1));// (1.0 / SAMPLES)*radiance(ray, 0);
if (s > 0)
{
var = (1.0*5*var + glm::length(color - lastCol))*(1.0 / (5 + 1));
//var = .5*var + .5*vars[clamp(0, w, x - 1) + w*clamp(0, h, y)];
}
if (s > 0 && var > noise_thresh)
samps-=2;
s++;
samps++;
}
color = gamma(color, 2.2);
//vars[x + w*y] = var;
#pragma omp critical (im_update)
{
_image->setPixel(x, y, glm::clamp(color/*1.0*(double)s / max_samples*glm::dvec3(1, 1, 1)*/, 0.0, 1.0));
}
}
}
}
avgTests /= w*h;
std::cout << "average intersection tests: " << avgTests << "\n";
}
glm::dvec3 radiance(const Ray& ray, int depth, const Halton_sampler& halton_sampler, const Halton_enum& halton_enum, int sample, glm::dvec3 contrib)
{
if (depth > MAX_DEPTH)
return glm::dvec3(0, 0, 0);
float sx = halton_sampler.sample(2 + 2*depth, sample);
float sy = halton_sampler.sample(3 + 2*depth, sample);
double offset = SHADOW_BIAS;
glm::dvec3 minHit, minNorm;
glm::dvec2 minUV;
Entity* current;
bool backface = false;
texture debugTex(glm::dvec3(.5, .5, .5));
texture debugTexEm(glm::dvec3(0, 0, 0));
Material dummyMat(&debugTex, &debugTexEm, 1, 1);
sphere debugSphere(glm::dvec3(0, 0, 0), 1, dummyMat);
bool intersected = trace(ray, minHit, minNorm, minUV, current);
if (intersected)
{
if (!current)
current = &debugSphere;
glm::dvec3 i(0, 0, 0);
glm::dvec3 refDir;
glm::dvec3 color = current->material.diffuse->get(minUV);
double roughness = current->material.roughness;
//type of secondary ray, 0 for reflection, 1 for refraction, 2 for glossy
// int type = rayType(current, ray, minNorm, minUV);
glm::dvec3 f(1, 1, 1);
secondaryRay(ray, current, minNorm, minUV, sx, sy, refDir, f, roughness, contrib, offset);
double tmin = 0;
double tmax = glm::length(minHit - ray.origin);
if (_scene->atmosphereBounds(ray, tmin, tmax))
{
glm::dvec3 hit, col;
//std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n";
if (raymarch(ray, hit, col, tmin, tmax))
{
minHit = hit;
refDir = randomUnitVec(sx, sy);
f = 1.0*col;
color = col;
contrib = col;
roughness = 1;
//std::cout << "atmosphere hit\n";
}
}
for (Light* light : _scene->lights)
{
bool shadow = false;
glm::dvec3 lightDir = light->getPoint(drand(), drand()) - (minHit + SHADOW_BIAS*minNorm);
double maxt = vecLengthSquared(lightDir);
//double cos_alpha = (light->rad / sqrt(vecLengthSquared(lightDir) + std::pow(light->rad, 2)));
double hfrac = 1 / (M_PI*vecLengthSquared(light->pos - minHit)); //fraction of the hemisphere
Ray shadow_ray(minHit + SHADOW_BIAS*minNorm, lightDir);
shadow = !visible(shadow_ray, maxt);
if (!shadow)
{
double d = glm::dot(minNorm, glm::normalize(light->pos - minHit));
if (d < 0)
d = 0;
double l = pow(d, (1.0 / roughness));
i = light->col*l*hfrac;
}
}
glm::dvec3 caustic = depth <= 10 ? samplePhotons(minHit, refDir, 32) : glm::dvec3(0, 0, 0);
//return color*caustic;
// continuation probability
double q = compMax(contrib);
if (depth <= MIN_DEPTH || drand() < q)
{
f *= depth <= MIN_DEPTH ? 1.0 : (1.0 / q);
//diffuse*direct_light + diffuse*brdf*radiance + emmissive
return color*i + f*radiance(Ray(minHit + offset*minNorm, refDir), ++depth, halton_sampler, halton_enum, sample, contrib) + current->material.emissive->get(minUV) + color*caustic;
}
else
return color*i;// glm::dvec3(0, 0, 0);///*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ //current->material.diffuse->get(minUV)*i + current->material.emissive->get(minUV);
}
else
return /*1.0*depth / MAX_DEPTH * glm::dvec3(1, 1, 1);//*/ ambient;
}
//checks if the given ray is visible, meaning that nothing overlaps it
bool visible(const Ray& ray, double mt)
{
bool hit = false;
std::vector<Entity*> shadow_objects = _scene->intersect(ray, 0, sqrt(mt)-SHADOW_BIAS);
std::vector<Entity*>::iterator shadow_it = shadow_objects.begin();
#ifdef DEBUG_OCTREE
return shadow_it == shadow_objects.end();
#endif
while (!hit && shadow_it != shadow_objects.end())
{
Entity* t = *shadow_it;
glm::dvec3 pos, norm;
glm::dvec2 uv;
if (t->intersect(ray, pos, norm, uv) && (drand() < t->material.getAlpha(uv) || t->material.IOR != 1))
{
double t_shadow = vecLengthSquared(pos - ray.origin);
hit = (t_shadow < mt)&&(t_shadow > 0);
}
++shadow_it;
}
if (hit)
return false;
double tmin = 0;
double tmax = mt;
if (_scene->atmosphereBounds(ray, tmin, tmax))
{
glm::dvec3 hit, col;
if (raymarch(ray, hit, col, tmin, tmax)) return false;
}
return true;
}
void secondaryRay(const Ray& ray, const Entity* current, glm::dvec3& norm, glm::dvec2& UV, double sx, double sy, glm::dvec3& refDir, glm::dvec3& f, double& roughness, glm::dvec3& contrib, double& offset)
{
bool backface = false;
if (glm::dot(norm, ray.dir) > 0)
{
norm *= -1.0;
backface = true;
}
glm::dvec3 color = current->material.diffuse->get(UV);
roughness = current->material.roughness;
int type = rayType(current, ray, norm, UV);
if (type == 1)
{
if (backface)
{
refDir = refr(ray.dir, norm, current->material.IOR);
}
else
{
refDir = refr(ray.dir, norm, 1.0 / current->material.IOR);
}
offset *= -1;
contrib = glm::dvec3(1, 1, 1);
f = 1.0*color;
}
else if (type == 0)
{
refDir = glm::reflect(ray.dir, norm);
contrib = glm::dvec3(1, 1, 1);
f = 1.0*color; // glm::dot(refDir, minNorm);
}
else
{
refDir = hemisphereSample_cos(norm, sx, sy, 2);
if (current->material.roughness < .9)
{
refDir = sample_phong(glm::reflect(ray.dir, norm), norm, (1.0 / (current->material.roughness)) + 1, sx, sy);
if (glm::dot(refDir, norm) < 0)
refDir = glm::reflect(refDir, norm);
}
f = 1.0*color;
glm::dvec3 inf = color;// *pow(dot, 1 / current->material.roughness);
contrib *= inf;
contrib = glm::mix(contrib, inf, 0.5);
}
}
//traces a ray against the scene geometry, returns true on intersection
bool trace(const Ray& ray, glm::dvec3& minHit, glm::dvec3& minNorm, glm::dvec2& minUV, Entity*& obj)
{
glm::dvec3 hit, norm;
glm::dvec2 uv;
bool intersected = false;
std::vector<std::pair<const Octree::Node*, double>> nodes = _scene->intersectSorted(ray, 0, INFINITY);
std::vector<std::pair<const Octree::Node*, double>>::iterator nd = nodes.begin();
#ifdef DEBUG_OCTREE
if(nd != nodes.end())
{
const Octree::Node* curNode = nd->first;
double tmin, tmax;
auto snap = [](glm::dvec3& in) {return in / std::max(in[0], std::max(in[1], in[2])); };
curNode->_bbox.intersect(ray, 0, INFINITY, tmin, tmax);
minHit = ray.origin + tmin * ray.dir;
glm::dvec3 p = minHit - curNode->_bbox.center();
glm::dvec3 d = .5 * curNode->_bbox.size();
glm::dvec3 n = (p / d) * (1.0001);
auto absMax = [](double a, double b){ return (std::max(std::abs(a), std::abs(b)) == std::abs(a)) ? a : b; };
double max = absMax(n[0], absMax(n[1], n[2]));
glm::dvec3 n_ = glm::dvec3(max == n[0], max == n[1], max == n[2]);
if (glm::length(n_) < .1 || glm::length(n_) > 2)
{
std::cout << "normal length out of tolerance: " << n_[0] << ", "
<< n_[1] << ", "
<< n_[2] << ", "
<< n[0] << ", "
<< n[1] << ", "
<< n[2] << ", " << "\n";
n_ = n;
}
minNorm = glm::normalize(n_);
minUV = glm::dvec2(0, 0);
obj = NULL;
return true;
}
return false;
#endif
//avgTests += objects.size();
if (drand() < .5 && nodes.size() > 500)
{
std::cout << nodes.size() << ", ray: " << ray.origin.x << ", " << ray.origin.y << ", " << ray.origin.z << "\n";
}
Entity* current;
bool term = false;
while (nd != nodes.end() && !term)
{
const Octree::Node* curNode = nd->first;
std::vector<Entity*>::const_iterator it = curNode->_entities.begin();
while (it != curNode->_entities.end())
{
Entity* tmp = *it;
if (tmp->intersect(ray, hit, norm, uv) && (drand() < tmp->material.getAlpha(uv) || tmp->material.IOR != 1))
{
if ((!intersected || vecLengthSquared(hit - ray.origin) < vecLengthSquared(minHit - ray.origin)))
{
current = tmp;
minHit = hit;
minNorm = norm;
minUV = uv;
intersected = true;
if (curNode->_bbox.contains(hit))
term = true;
}
}
++it;
}
++nd;
}
if(intersected)
obj = current;
return intersected;
}
//returns the type of the secondary ray, 0 for reflection, 1 for refraction, 2 for diffuse/glossy
int rayType(const Entity* entity, const Ray& ray, glm::dvec3& norm, glm::dvec2& minUV)
{
int type = 2;
double IOR = entity->material.IOR;
double opacity = entity->material.diffuse->getAlpha(minUV) * entity->material.opacity;
double r0 = std::pow((1 - IOR) / (1 + IOR), 2);
//Schlicks approximation of the fresnel term
double fs = r0 + (1 - r0)*std::pow(1 - glm::dot(glm::reflect(ray.dir, norm), norm), 5);
if (entity->material.roughness < .001)
{
type = 0;
}
if (drand() > opacity)
{
if (drand() < fs)
type = 0;
else
type = 1;
}
return type;
}
//uses raymarching to determine the intersection point of a ray with the atmosphere
bool raymarch(const Ray& r, glm::dvec3& hit, glm::dvec3& col, double mint, double maxt)
{
double t = mint + SHADOW_BIAS;
double scatter;
glm::dvec3 current = r.origin + mint*r.dir;
while (t < maxt)
{
if (drand() < _scene->atmosphereDensity(current, col, scatter))
{
hit = current;
return true;
}
current += RAYMARCH_STEPSIZE * r.dir;
t += RAYMARCH_STEPSIZE;
}
return false;
}
//computes a radiance estimate from the photons surrounding the hit point
glm::dvec3 samplePhotons(glm::dvec3 pos, glm::dvec3 dir, int count)
{
double dist = .0;
glm::dvec3 res(0, 0, 0);
double scale = 0;
std::vector<Photon*> photons = _photon_map->getInRange(pos, scale, dist);
/*if(photons.size() > 9*MAX_PHOTONS_PER_LEAF && drand() < .01)
{
std::cout << "photons: " << photons.size() << "\n";
}*/
count = std::min(count, (int)photons.size());
std::partial_sort(photons.begin(), photons.begin() + count, photons.end(), [pos](const Photon* lhs, const Photon* rhs) {return vecLengthSquared(lhs->origin - pos) < vecLengthSquared(rhs->origin - pos); });
double maxDist = 0;
//std::cout << photons.size() << "\n";
/*double lastDist = 0;
if(photons.size() > 0)
lastDist = vecLengthSquared(photons[0]->origin - pos);*/
for (int i = 0; i < count; i++)
{
Photon* p = photons[i];
/*if((vecLengthSquared(p->origin - pos) - lastDist) < 0)
{
std::cout << "non-ascending photon distance: " << lastDist << ", " << vecLengthSquared(p->origin - pos) << "\n";
}
lastDist = vecLengthSquared(p->origin - pos);*/
//if (vecLengthSquared(p->origin - pos) < .01 && visible(Ray(pos + SHADOW_BIAS*dir, p->origin - pos - SHADOW_BIAS*dir), glm::length(p->origin - pos - SHADOW_BIAS*dir)))
res += p->col*glm::dot(p->dir, dir);
}
if(photons.size() > 0)
{
maxDist = vecLengthSquared(photons[count - 1]->origin - pos);
res /= (M_PI*maxDist);
}
return res;
}
//traces caustic photons from every light source
void tracePhotons(int maxDepth, int count, Halton_sampler& halton_sampler, Halton_enum& halton_enum)
{
_photon_map->reserve(count);
int total = 0;
#pragma omp parallel
{
std::vector<Photon*> tmp;
int tmpCount = 0;
#pragma omp for
for (int i = 0; i < count; i++)
{
for (Light* l : _scene->lights)
{
int tries = 0;
bool stored = false;
//srand(i);
while (!stored && tries < 500)
{
float sx = halton_sampler.sample(0, i * 500 + tries);
float sy = halton_sampler.sample(1, i * 500 + tries);
//sx = fmod(halton_enum.scale_x(sx), 1.0);
//sy = fmod(halton_enum.scale_y(sy), 1.0);
//std::cout << i * 500 + tries << ": " << sx << ", " << sy << "\n";
glm::dvec3 pos = l->getPointInRange(sx, sy);
glm::dvec3 dir = sphereCapSample_cos(glm::normalize(pos - l->pos), fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), 2, l->angle);
Ray r(pos, dir);
glm::dvec3 hit, norm;
glm::dvec3 col = (1.0/count)*.5*l->angle*l->col;
glm::dvec2 UV;
Entity* current;
int depth = 0;
bool term = false;
bool isCaustic = false;
if (!trace(r, hit, norm, UV, current))
{
tries++;
continue;
}
while (depth < maxDepth && !term)
{
double roughness = current->material.roughness;
//std::cout << "tracing photon, depth: " << depth << "\n";
if (roughness < 0.1)
{
//std::cout << "tracing caustics photon at depth: " << depth << "\n";
if (!trace(r, hit, norm, UV, current))
{
term = true;
continue;
}
roughness = current->material.roughness;
glm::dvec3 refDir, f, contrib;
double offset = SHADOW_BIAS;
sx = halton_sampler.sample(0, maxDepth*(i * 500 + tries) + depth);
sy = halton_sampler.sample(1, maxDepth*(i * 500 + tries) + depth);
//sx = fmod(halton_enum.scale_x(sx), 1.0);
//sy = fmod(halton_enum.scale_y(sy), 1.0);
secondaryRay(r, current, norm, UV, fmod(drand() + 5 * i, 1), fmod(drand() + 13 * i, 1), refDir, f, roughness, contrib, offset);
double tmin = 0;
double tmax = glm::length(hit - r.origin);
if (_scene->atmosphereBounds(r, tmin, tmax))
{
glm::dvec3 ahit, color;
//std::cout << "atmosphere bounds hit: " << tmin << ", " << tmax << "\n";
if (raymarch(r, ahit, color, tmin, tmax))
{
hit = ahit;
refDir = randomUnitVec(fmod(drand() + 13 * i, 1), fmod(drand() + 7 * i, 1));
f = 1.0*color;
roughness = 1;
//std::cout << "atmosphere hit by photon\n";
}
}
col *= f;
r.origin = hit + offset*norm;
r.setDir(refDir);
isCaustic = true;
}
if (depth > 0 && isCaustic && roughness >= 0.1)
{
//std::cout << "photon stored\n";
tmp.push_back(new Photon(hit, r.dir, col));
term = true;
stored = true;
}
depth++;
}
tries++;
}
tmpCount += tries;
}
}
#pragma omp critical
{
for (Photon* p : tmp)
{
_photon_map->push_back(p);
//_scene->push_back(new sphere(p->origin, 0.01, Material(new texture(p->col), new texture(glm::dvec3(0, 0, 0)), 1, 1)));
}
total += tmpCount;
}
}
std::cout << "total photon tests: " << total << "\n";
}
bool running() const { return _running; }
void stop() { _running = false; }
void start() { _running = true; }
int photons = PHOTONS;
int photon_depth = PHOTON_DEPTH;
int min_samples = MIN_SAMPLES;
int max_samples = SAMPLES;
double noise_thresh = NOISE_THRESH;
glm::dvec3 ambient = glm::dvec3(0, 0, 0);
std::shared_ptr<Image> getImage() const { return _image; }
Camera _camera;
private:
bool _running = false;
Octree* _scene;
PhotonMap* _photon_map;
std::shared_ptr<Image> _image;};
|
ompcompute.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#ifndef LOG_LIM
#define LOG_LIM 29
#endif
#define N 2
int main(int argc, char **argv)
{
int64_t i, acc[N], id;
printf("Computation started\n"); fflush(stdout);
omp_set_num_threads(N);
for( i=0; i<N; i++ )
acc[i] = 7+i;
#pragma omp parallel
{
int64_t a;
id = omp_get_thread_num();
a = acc[id];
for( i=0; i<(1LU<<LOG_LIM); i++ )
{
a ^= i*(i<<4);
}
acc[id] = a;
}
for(i=1; i<N; i++)
acc[0] ^= acc[i];
printf("Computation done%c\n", acc[0]?' ':'.');
return 0;
}
|
pastix.c | /* CalculiX - A 3-dimensional finite element program */
/*Copyright (C) 1998-2021 Guido Dhondt*/
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU General Public License as*/
/* published by the Free Software Foundation(version 2);*/
/**/
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of*/
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the*/
/* GNU General Public License for more details.*/
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#ifdef PASTIX
#include <pastix.h>
#include <spm.h>
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "CalculiX.h"
#include "pastix.h"
// Time structs for benchmarking
extern struct timespec totalCalculixTimeStart, totalCalculixTimeEnd;
double totalPastixTime;
// Variables for storing reuse history
int totalIterations = 0;
int totalReused = 0;
// Current sparse matrix in STI format
double* auPtr = NULL;
double* adPtr = NULL;
ITG *icolTotal = NULL, *irowTotal = NULL;
// Matrix data from previous iteration
ITG neqPrev=0, nzsPrev=0;
ITG *icolPrev=NULL,*irowPrev=NULL,*jqPrev=NULL,*offsetPrev=NULL;
ITG inputformatPrev=-1;
ITG basePrev=1;
char noScale=0;
// Current sparse matrix in CSC
double *aupastix=NULL;
ITG *icolpastix=NULL,*irowpastix=NULL;
// Global variable that indicates whether we are currently reusing or not
char redo = 1;
// Global variable which data set was previously used (basic/radiation) and now
#define BASIC 1
#define AS 2
char modePrev = BASIC;
char mode = BASIC;
#define SINGLE_SOLVE 1;
#define MULTI_SOLVE 2;
char usage = SINGLE_SOLVE;
// PaStiX configuration
spm_int_t iparm_basic[IPARM_SIZE];
spm_int_t iparm_as[IPARM_SIZE];
double dparm_basic[DPARM_SIZE];
double dparm_as[DPARM_SIZE];
spm_int_t *iparm = iparm_basic;
double *dparm = dparm_basic;
pastix_data_t* pastix_data = NULL;
spmatrix_t *spm = NULL;
// GPU active or not
char gpu = 0;
// Store how many nzs the merged Matrix has
ITG nzsTotal = 0;
// Size of allocated space for sparse matrix
ITG pastix_nnzBound = 0;
// Number of iterations that failed with mixed precision
ITG mixedFailed = 0;
// indicates whether this is the first invocation of PaStiX or not
char firstIter = 1;
// When this flag is activated, PaStiX will not reuse in the next iteration
char forceRedo = 0;
// Use double or mixed precision
char mixed = 1;
char globDoublePrecision = 0;
// This is set to one, when to many iterations with mixed precision did not converge
char stickToDouble = 0;
// Pointers for faster matrix transpose
ITG *irowacc = NULL;
ITG *irowPrediction = NULL;
// Number of threads
ITG nthread_mkl=0;
struct pastix_data_s {
int totalIterations;
int totalReused;
ITG *icolTotal;
ITG *irowTotal;
ITG neqPrev;
ITG nzsPrev;
ITG *icolPrev;
ITG *irowPrev;
ITG *jqPrev;
ITG inputformatPrev;
ITG basePrev;
ITG *offsetPrev;
double *aupastix;
ITG *icolpastix;
ITG *irowpastix;
char redo;
ITG nzsTotal;
ITG pastix_nnzBound;
ITG mixedFailed;
char firstIter;
char forceRedo;
char globDoublePrecision;
char stickToDouble;
ITG *irowacc;
ITG *irowPrediction;
spm_int_t *iparm;
double *dparm;
char gpu;
char mixed;
pastix_data_t* pastix_data;
spmatrix_t *spm;
};
typedef struct pastix_data_s pastix_data_object;
pastix_data_object pastix_mode_basic = {
0,0,NULL,NULL,0,0,NULL,NULL,NULL,-1,0,NULL,
NULL,NULL,NULL,1,0,0,0,1,0,0,0,NULL,NULL,iparm_basic,dparm_basic,
0,1,NULL,NULL
};
pastix_data_object pastix_mode_as = {
0,0,NULL,NULL,0,0,NULL,NULL,NULL,-1,0,NULL,
NULL,NULL,NULL,1,0,0,0,1,0,0,0,NULL,NULL,iparm_as,dparm_as,
0,1,NULL,NULL
};
// Initializes and configurates PaStiX environment. Also forwards the sparse matrix pointers
void pastix_init(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
// if reusing, only update the value pointer of the sparse matrix
if(!redo){
pastixResetSteps(pastix_data);
if(spm->values != aupastix && spm->values != NULL ) free(spm->values);
spm->values = aupastix;
printf("\n");
spmPrintInfo( spm, stdout );
printf("\n");
return;
}
ITG nthread, nthread_v;
char *env;
/*set MKL_NUM_THREADS to min(CCX_NPROC_EQUATION_SOLVER,OMP_NUM_THREADS)
must be done once*/
if (nthread_mkl == 0) {
nthread=1;
env=getenv("MKL_NUM_THREADS");
if(env) {
nthread=atoi(env);}
else {
env=getenv("OMP_NUM_THREADS");
if(env) {nthread=atoi(env);}
}
env=getenv("CCX_NPROC_EQUATION_SOLVER");
if(env) {
nthread_v=atoi(env);
if (nthread_v <= nthread) {nthread=nthread_v;}
}
if (nthread < 1) {nthread=1;}
nthread_mkl=nthread;
}
// Init integer and double parameters with default values
pastixInitParam( iparm, dparm );
// Set best PaStiX parameters for CalculiX usage
iparm[IPARM_ORDERING] = PastixOrderScotch;
if( mode == AS ){
iparm[IPARM_SCHEDULER] = PastixSchedStatic;
}
else{
iparm[IPARM_SCHEDULER] = PastixSchedParsec;
}
iparm[IPARM_THREAD_NBR] = nthread_mkl;
iparm[IPARM_GPU_NBR] = (int) gpu;
iparm[IPARM_FLOAT] = globDoublePrecision ? 3 : 2;
iparm[IPARM_MIN_BLOCKSIZE] = 1024;
iparm[IPARM_MAX_BLOCKSIZE] = 2048;
iparm[IPARM_FACTORIZATION] = PastixFactLU;
iparm[IPARM_TASKS2D_WIDTH] = globDoublePrecision ? 256 : 128;
iparm[IPARM_REFINEMENT] = PastixRefineGMRES;
iparm[IPARM_REUSE_LU] = firstIter ? 0 : 1;
iparm[IPARM_REUSE_LU] = forceRedo ? 2 : 1;
iparm[IPARM_GPU_MEMORY_PERCENTAGE] = 95;
iparm[IPARM_GPU_MEMORY_BLOCK_SIZE] = 64 * 1024;
dparm[DPARM_EPSILON_REFINEMENT] = 1e-12;
dparm[DPARM_EPSILON_MAGN_CTRL] = 0.;
iparm[IPARM_ITERMAX] = 70;
iparm[IPARM_GMRES_IM] = 70;
// Initialize sparse matrix
spm = malloc( sizeof( spmatrix_t ) );
spmInit(spm);
spm->flttype = globDoublePrecision ? SpmDouble : SpmFloat;
if(spm->values != aupastix && spm->values != NULL ) free(spm->values);
spm->values = aupastix;
spm->fmttype = SpmCSC;
spm->nexp = spm->gNexp = spm->gN = spm->n = *neq;
spm->mtxtype = SpmGeneral;
if( *inputformat == 3 ){
spm->nnzexp = spm->gnnzexp = spm->gnnz = spm->nnz = nzsTotal + *neq;
} else{
spm->nnzexp = spm->gnnzexp = spm->gnnz = spm->nnz = nzsTotal * 2 + *neq;
}
spm->colptr = (spm_int_t*) icolpastix;
spm->rowptr = (spm_int_t*) irowpastix;
// initialize pastix
pastixInit( &pastix_data, MPI_COMM_WORLD, iparm, dparm );
printf("\n");
spmPrintInfo( spm, stdout );
printf("\n");
// perform reordering, analysis and symbolic factorization if it's more than 1 equation
if(spm->n > 1){
pastix_task_analyze( pastix_data, spm );
}
}
void pastix_csc_conversion(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
ITG i,j;
char merged=0;
// jq for the merged matrix
ITG* jqTotal = NULL;
if(*neq != neqPrev || *inputformat != inputformatPrev)
forceRedo = 1;
redo = forceRedo ? 1 : 0;
if(!redo){
nzsTotal = 0;
NNEW(icolTotal, ITG, *neq);
ITG base = jq[0];
// Compute the number of entries in the merged matrix
#pragma omp parallel for reduction(+:nzsTotal)
for(i=0;i<*neq;i++){
ITG kCur = jq[i] - base;
ITG kPrev = jqPrev[i] - basePrev;
ITG curColTotal = 0;
while(kCur < jq[i+1] - base && kPrev < jqPrev[i+1] - basePrev) {
if(irowPrev[kPrev] == irow[kCur]){
kCur++;
kPrev++;
}
else{
if(irowPrev[kPrev] < irow[kCur])
kPrev++;
else // irowPrev[kPrev] > irow[k]
kCur++;
}
curColTotal++;
}
while(kCur < jq[i+1] - base){
kCur++;
curColTotal++;
}
while(kPrev < jqPrev[i+1] - basePrev){
kPrev++;
curColTotal++;
}
icolTotal[i] = curColTotal;
nzsTotal += curColTotal;
}
// compute jq for the merged matrix
NNEW(jqTotal, ITG, (*neq+1));
jqTotal[0] = base;
for(i = 0; i < *neq; i++){
jqTotal[i+1] = jqTotal[i] + icolTotal[i];
}
// If the number of entries in the merged matrix is the same as in the last iteration, we can reuse
if(nzsTotal == nzsPrev){
printf("Reusing csc.!\n");
}
else{
redo = 1;
printf("Not reusing csc, merging patterns!\n");
}
// allocate space for the sparse matrix
if(*symmetryflag && *inputformat != 3)
NNEW(auPtr,double,2 * nzsTotal);
else
NNEW(auPtr,double,nzsTotal);
NNEW(adPtr,double,neqPrev);
NNEW(irowTotal, ITG, nzsTotal);
if(*symmetryflag && *inputformat != 3){
j=2*nzsTotal;
}
else{
j=nzsTotal;
}
memset(auPtr, 0, j * sizeof(double));
memset(adPtr, 0, *neq * sizeof(double));
// merge the old and the new sparsity pattern
#pragma omp parallel for shared(auPtr)
for(i=0;i<*neq;i++){
ITG kCur = jq[i] - base;
ITG kPrev = jqPrev[i] - base;
ITG kTotal = jqTotal[i] - base;
adPtr[i] = ad[i] - (*sigma == 0 ? 0.0 : (*sigma)*adb[i]);
while(kCur < jq[i+1] - base && kPrev < jqPrev[i+1] - base) {
if(irowPrev[kPrev] == irow[kCur]){
auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]);
if(*symmetryflag && *inputformat != 3 )
auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]);
irowTotal[kTotal] = irow[kCur];
kCur++;
kPrev++;
}
else{
if(irowPrev[kPrev] < irow[kCur]){
// auPtr[kTotal] = 0.0;
// if(*symmetryflag)
// auPtr[kTotal + nzsTotal] = 0.0;
irowTotal[kTotal] = irowPrev[kPrev];
kPrev++;
}
else // irowPrev[kPrev] > irow[k]
{
auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]);
if(*symmetryflag && *inputformat != 3)
auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]);
irowTotal[kTotal] = irow[kCur];
kCur++;
}
}
kTotal++;
}
while(kCur < jq[i+1] - base){
auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]);
if(*symmetryflag && *inputformat != 3)
auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]);
irowTotal[kTotal] = irow[kCur];
kCur++;
kTotal++;
}
while(kPrev < jqPrev[i+1] - base){
// auPtr[kTotal] = 0.0;
// if(*symmetryflag)
// auPtr[kTotal + nzsTotal] = 0.0;
irowTotal[kTotal] = irowPrev[kPrev];
kPrev++;
kTotal++;
}
}
SFREE(irowPrev);
SFREE(icolPrev);
SFREE(jqPrev);
irowPrev = NULL;
icolPrev = NULL;
jqPrev = NULL;
// update pointers to the merged matrix
icol = icolTotal;
icolPrev = icolTotal;
irow = irowTotal;
irowPrev = irowTotal;
jqPrev = jqTotal;
nzsPrev = nzsTotal;
basePrev = base;
au = auPtr;
ad = adPtr;
merged = 1;
}
else
{
// This is executed in either the first iteration, or when the number of equations changed
printf("Not reusing csc.\n");
if(icolPrev != NULL){
SFREE(icolPrev);
icolPrev = NULL;
}
if(irowPrev != NULL){
SFREE(irowPrev);
irowPrev = NULL;
}
if(jqPrev != NULL){
SFREE(jqPrev);
jqPrev = NULL;
}
NNEW(icolPrev,ITG,*neq);
NNEW(irowPrev,ITG,*nzs);
NNEW(jqPrev,ITG,*neq+1);
memcpy(icolPrev, icol, sizeof(ITG) * *neq);
memcpy(irowPrev, irow, sizeof(ITG) * *nzs);
memcpy(jqPrev, jq, sizeof(ITG) * (*neq+1));
nzsTotal = *nzs;
nzsPrev = *nzs;
neqPrev = *neq;
jqTotal = jqPrev;
inputformatPrev = *inputformat;
}
// Convert Matrix Format
if(*inputformat==1 || *symmetryflag==0){
/* lower triangular matrix is stored column by column in
au, followed by the upper triangular matrix row by row;
the diagonal terms are stored in ad */
// allocate space for the matrix and the utility arrays
if(redo){
// We allocate 10% more space for the values than required so that we have to perform the expensive cudaMallocHost only once, even when the size of the matrix increases slightly
if((nzsTotal * 2 + *neq) > pastix_nnzBound){
// perform the call with PaStiX because pinned memory allocation via CUDA is performed if gpu is activated
if( !firstIter && aupastix == spm->values ) spm->values = NULL;
pastixAllocMemory((void**)&aupastix, sizeof(double) * 1.1 * (nzsTotal * 2 + *neq), gpu);
pastix_nnzBound = 1.1 * (nzsTotal * 2 + *neq);
}
if(irowpastix != NULL ){
SFREE(irowpastix);
if( irowpastix == spm->rowptr ) spm->rowptr = NULL;
}
NNEW(irowpastix,ITG,nzsTotal*2+*neq);
if(icolpastix != NULL ){
SFREE(icolpastix);
if( icolpastix == spm->colptr ) spm->colptr = NULL;
}
NNEW(icolpastix,ITG,*neq+1);
if(irowacc != NULL){
SFREE(irowacc);
irowacc = NULL;
}
NNEW(irowacc,ITG,*neq);
if(irowPrediction != NULL){
SFREE(irowPrediction);
irowPrediction = NULL;
}
NNEW(irowPrediction,ITG,nzsTotal);
}
// Compute utility pointers for parallelization
// irowPrediction stores the offset to the first entry in it's column of each entry
// irowacc stores the number of elements in each row
if(redo){
for(i=0;i<nzsTotal;i++){
irowPrediction[i] = irowacc[irow[i]-1]++;
}
icolpastix[0] = 1;
for(i=0;i<*neq;i++){
icolpastix[i+1] = icolpastix[i] + icol[i] + irowacc[i] + 1;
}
}
// copy lower triangular values to the right position in the CSC
#pragma omp parallel for private(j) shared(aupastix)
for(i=0;i<*neq;i++){
ITG k_pastix = icolpastix[i] + irowacc[i];
ITG k = jqTotal[i] - 1;
aupastix[k_pastix-1] = ad[i] - (merged != 0 ? 0.0 : (*sigma == 0.0 ? 0.0 : (*sigma)*adb[i]));
memcpy(aupastix + k_pastix, au + k, sizeof(double) * icol[i]);
if(*sigma != 0.0 && !merged ){
for(j=0;j<icol[i];j++){
aupastix[k_pastix+j] -= (*sigma)*aub[k+j];
}
}
}
// copy the upper triangular values to the right position in the CSC
#pragma omp parallel for private(j) shared(aupastix)
for(i=0;i<*neq;i++){
ITG k = jqTotal[i] - 1;
for(j=0;j<icol[i];j++){
aupastix[irowPrediction[k] + icolpastix[irow[k]-1] - 1] = au[k+(*symmetryflag == 0 ? 0 : (*nzs == *nzs3 ? nzsTotal : *nzs3))] - (merged != 0 ? 0 : (*sigma == 0.0 ? 0.0 : (*sigma *aub[k+(*symmetryflag == 0 ? 0 : (*nzs == *nzs3 ? nzsTotal : *nzs3))])));
k++;
}
}
// do the same for the rowptr (does not change when reusing)
if(redo){
#pragma omp parallel for
for(i=0;i<*neq;i++){
ITG k_pastix = icolpastix[i] + irowacc[i];
ITG k = jqTotal[i] - 1;
irowpastix[k_pastix-1] = i+1;
memcpy(irowpastix + k_pastix, irow + k, sizeof(ITG) * icol[i]);
}
#pragma omp parallel for private(j) shared(irowpastix)
for(i=0;i<*neq;i++){
ITG k = jqTotal[i] - 1;
for(j=0;j<icol[i];j++){
irowpastix[irowPrediction[k] + icolpastix[irow[k]-1] - 1] = i+1;
k++;
}
}
}
}
else if(*inputformat==3){
ITG countnew = 0;
ITG *row_newEntries = NULL;
ITG *col_newEntries = NULL;
// search for missing entries for a structural symmetric matrix
if(redo){
row_newEntries = malloc( sizeof(ITG) * nzsTotal );
col_newEntries = malloc( sizeof(ITG) * nzsTotal );
memset( row_newEntries, 0, sizeof(ITG) * nzsTotal );
memset( col_newEntries, 0, sizeof(ITG) * nzsTotal );
char found = 0;
ITG z = 0;
ITG temp = 0;
// loop through the columns
#pragma omp parallel for private(j,z) firstprivate(temp,found)
for(i=0;i<*neq;i++){
// loop through the entries in this column
for(j=jqTotal[i]-1;j<jqTotal[i+1]-1;j++){
temp = irow[j];
// loop through the symmetric column counter part to check for symmetry
for(z=jqTotal[temp-1]-1;z<jqTotal[temp]-1;z++){
if( irow[z]-1 == i ){
found=1;
break;
}
}
// if no entry was found add a dummy to the array and increase the counter for missing entries
#pragma omp critical
if( found == 0 ){
row_newEntries[countnew] = i + 1;
col_newEntries[countnew] = temp;
countnew++;
}
found = 0;
}
}
printf("added %d entries to the matrix\n",countnew);
nzsTotal += countnew;
// allocate memory for the PaStiX arrays and free the old ones if necessary
if((nzsTotal + *neq) > pastix_nnzBound){
if( !firstIter && aupastix == spm->values ) spm->values = NULL;
pastixAllocMemory((void**)&aupastix, sizeof(double) * 1.1 * (nzsTotal + *neq), gpu);
pastix_nnzBound = 1.1 * (nzsTotal + *neq);
}
memset( aupastix, 0, sizeof(double) * pastix_nnzBound );
if(irowpastix != NULL ){
SFREE(irowpastix);
if(irowpastix == spm->rowptr) spm->rowptr = NULL;
}
NNEW(irowpastix,ITG,nzsTotal+*neq);
if(icolpastix != NULL ){
SFREE(icolpastix);
if(icolpastix == spm->colptr) spm->colptr = NULL;
}
NNEW(icolpastix,ITG,*neq+1);
memcpy(icolpastix, jqTotal, sizeof(ITG) * (*neq+1));
if(offsetPrev != NULL ){
SFREE(offsetPrev);
}
NNEW(offsetPrev,ITG,*neq+1);
memset(offsetPrev,0,*neq+1);
}
else{
nzsTotal += offsetPrev[*neq];
memset( aupastix, 0, sizeof(double) * pastix_nnzBound );
}
//#pragma omp parallel for private(j) firstprivate(offsetPrev)
for(i=0;i<*neq;i++){
ITG entriesPerColumn = jqTotal[i+1] - jqTotal[i];
ITG offsetSource = jqTotal[i] - jqTotal[0];
if(redo){
// copy irow column per column and add the additional diagonal entry
memcpy(irowpastix + i + offsetSource + offsetPrev[i], irow + offsetSource,
sizeof(ITG) * entriesPerColumn);
irowpastix[i+jqTotal[i+1]-jqTotal[0]+offsetPrev[i]] = i+1;
}
// copy au column per column
memcpy(aupastix + i + offsetSource + offsetPrev[i], au + offsetSource,
sizeof(double) * entriesPerColumn);
// subtract the buckling values
if(*sigma != 0 && merged == 0){
for(j=0;j<entriesPerColumn;j++){
aupastix[i + offsetSource + j + offsetPrev[i]] -= (*sigma)*aub[offsetSource + j];
}
}
// add the diagonal entries to aupastix
aupastix[i + (jqTotal[i+1] - jqTotal[0]) + offsetPrev[i]] = ad[i] - (merged != 0 ? 0.0 : (*sigma == 0 ? 0.0 : (*sigma)*adb[i]));
// add the found entries for making the matrix structural symmetric and increase the resulting offset in arrays
if(redo){
offsetPrev[i+1] = offsetPrev[i];
for( j=0;j<countnew;j++ ){
if( col_newEntries[j]-1 == i ){
irowpastix[i + (jqTotal[i+1] - jqTotal[0]) + 1 + offsetPrev[i+1]] = row_newEntries[j];
offsetPrev[i+1]++;
}
}
// add the diagonal and additional entries to the column pointer
icolpastix[i+1] += i+1+offsetPrev[i+1];
}
}
if(redo) icolpastix[*neq] = nzsTotal + *neq + 1;
// free arrays for added symmetrized entries
if(row_newEntries){
SFREE(row_newEntries);
row_newEntries = NULL;
}
if(col_newEntries){
SFREE(col_newEntries);
col_newEntries = NULL;
}
}
// Free the merged array in STI format
if(auPtr){
SFREE(auPtr);
auPtr = NULL;
}
if(adPtr){
SFREE(adPtr);
adPtr = NULL;
}
}
// PaStiX invocation when the factorization function is called individually
void pastix_factor_main_generic(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
pastix_set_globals(mode);
// Set GPU flag from environment
const char* pastix_gpu = getenv("PASTIX_GPU");
if(pastix_gpu)
gpu = ( mode == AS ) ? 0 : (*pastix_gpu == '1') ? 1 : 0;
// Perform individual invocations always in double precision. If previous iterations were in single precision, do not reuse.
forceRedo=1;
globDoublePrecision = 1;
// invoke PaStiX
pastix_csc_conversion(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3);
pastix_cleanup(neq,symmetryflag);
pastix_init(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3);
gpu = 0;
iparm[IPARM_GPU_NBR]=0;
pastix_data->iparm[IPARM_GPU_NBR]=0;
pastix_factor(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3);
}
// invokes the factorization routine of PaStiX
void pastix_factor(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
if(spm->n == 1)
return;
pastix_task_numfact( pastix_data, spm );
}
// invokes the solve and iterative refinement routines of PaStiX
ITG pastix_solve_generic(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){
ITG i;
double* b;
float* buffer;
ITG rc=0;
// dont call pastix with only one equation, might lead to segfault
if(spm->n == 1)
{
x[0] = x[0] / aupastix[0];
return 0;
}
// check whether the RHS consists of only Zeroes and return in that case
char allZero = 1;
for(i = 0; i < *neq; i++){
if(x[i] != 0){
allZero = 0;
break;
}
}
if(allZero){
printf("RHS only consists of 0.0\n");
return 0;
}
//Copy the b so that we can modify x without losing b
NNEW(b,double,*nrhs**neq);
memcpy(b, x, sizeof(double) * (*nrhs) * (*neq));
// If we are in mixed precision mode, cast double x to float x and call solve. Afterwards upcast the solution.
if(!globDoublePrecision){
NNEW(buffer,float,*nrhs**neq);
#pragma omp parallel for
for(i = 0; i < (*nrhs) * (*neq); i++){
buffer[i] = (float) x[i];
}
rc = pastix_task_solve( pastix_data, *nrhs, buffer, spm->n );
#pragma omp parallel for
for(i = 0; i < (*nrhs) * (*neq); i++){
x[i] = (double) buffer[i];
}
SFREE(buffer);
buffer = NULL;
}
else{
rc = pastix_task_solve( pastix_data, *nrhs, x, spm->n );
}
// check for NaN in the solution
if( x[0] != x[0] ){
printf("\nSolution contains NaN!\n\n");
if( noScale ){
return -1;
}
else{
return -2;
}
}
// invoke iterative refinement in double precision
// rc = pastix_task_refine( pastix_data, spm->n, *nrhs, (void*)b, spm->n, (void*)x, spm->n );
char usage_call = SINGLE_SOLVE
if( usage == usage_call || rc != 0 ){
// invoke iterative refinement in double precision
rc = pastix_task_refine( pastix_data, spm->n, *nrhs, (void*)b, spm->n, (void*)x, spm->n );
iparm[IPARM_GPU_NBR] = 0;
dparm[DPARM_EPSILON_MAGN_CTRL] = 1e-14;
iparm[IPARM_ITERMAX] = 50;
rc = pastix_task_refine( pastix_data, spm->n, *nrhs, (void*)b, spm->n, (void*)x, spm->n );
iparm[IPARM_GPU_NBR] = (int) gpu;
iparm[IPARM_ITERMAX] = 70;
}
// FILE *f=fopen("spm.out","a");
// fprintf(f,"\n\nMatrix\n");
// spmConvert(SpmCSR, spm);
// spmPrint( spm, f);
// spmConvert(SpmCSC, spm);
//
// fprintf(f,"Solution vector b:\n");
// for(i=0;i<*neq;i++){
// fprintf(f,"b[%d] = %.17f\n",i,b[i]);
// }
//
// fprintf(f,"Solution vector x:\n");
// for(i=0;i<*neq;i++){
// fprintf(f,"x[%d] = %.17f\n",i,x[i]);
// }
//
// fclose(f);
SFREE(b);
b = NULL;
modePrev = mode;
if( !rc ) firstIter = 0;
if( rc == -1 && globDoublePrecision && !noScale ){
rc = -2;
}
return rc;
}
ITG pastix_solve(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){
mode = BASIC;
usage = MULTI_SOLVE;
if( modePrev != mode ) pastix_set_globals(mode);
ITG rc = pastix_solve_generic(x,neq,symmetryflag,nrhs);
return rc;
}
ITG pastix_solve_as(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){
mode = AS;
usage = MULTI_SOLVE;
if( modePrev != mode ) pastix_set_globals(mode);
ITG rc = pastix_solve_generic(x,neq,symmetryflag,nrhs);
return rc;
}
// Invokes pastixFinalize and spmExit which frees everything but the dense LU array and parsec pointer
void pastix_cleanup(ITG *neq,ITG *symmetryflag){
if( redo && !firstIter ){
if(spm->values == aupastix) spm->values = NULL;
if(spm->values == spm->valuesGPU) spm->valuesGPU = NULL;
if(spm->colptr == icolpastix) spm->colptr = NULL;
if(spm->rowptr == irowpastix) spm->rowptr = NULL;
spmExit( spm );
if(spm != NULL){
free( spm );
spm = NULL;
}
pastixFinalize( &pastix_data );
}
return;
}
void pastix_cleanup_as(ITG *neq,ITG *symmetryflag){
pastix_cleanup(neq,symmetryflag);
return;
}
// main method for executing PaStiX
void pastix_main_generic(double *ad, double *au, double *adb, double *aub,
double *sigma,double *b, ITG *icol, ITG *irow,
ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat,
ITG *jq, ITG *nzs3,ITG *nrhs){
if(*neq==0){
return;
}
else if(*neq==1){
noScale=1;
}
pastix_set_globals( mode );
const char* pastix_gpu = getenv("PASTIX_GPU");
if(pastix_gpu)
gpu = (*pastix_gpu == '1') ? 1 : 0;
usage = SINGLE_SOLVE;
// check mixed precision environment variable
const char* pastix_mixed = getenv("PASTIX_MIXED_PRECISION");
if( pastix_mixed != NULL ){
mixed = (*pastix_mixed == '1') ? 1 : 0;
}
else{
mixed = 1;
}
if( stickToDouble == 0 && mixed == 1 ){
if( globDoublePrecision == 1 ){
forceRedo = 1;
}
globDoublePrecision = 0;
}
else{
globDoublePrecision = 1;
}
// use double precision for inputformat 3 like mortar (better perfromance and convergence)
if( pastix_mixed == NULL && *inputformat == 3 ){
globDoublePrecision = 1;
forceRedo = 0;
stickToDouble = 1;
}
// backup b in case mixed precision solve corrupts the original array
double* b_backup = NULL;
NNEW(b_backup, double, *nrhs * *neq);
memcpy(b_backup, b, sizeof(double) * (*nrhs)*(*neq));
// benchmarking structs
struct timespec start, end;
struct timespec stepCscConversionStart, stepCscConversionEnd;
struct timespec stepInitStart, stepInitEnd;
struct timespec stepFactorizeStart, stepFactorizeEnd;
struct timespec stepSolveStart, stepSolveEnd;
struct timespec stepCleanUpStart, stepCleanUpEnd;
double pastixTime, stepCscConversion, stepInit, stepFactorize, stepSolve, stepCleanUp, totalCCXTime, CCXwithoutPastix;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC, &stepCscConversionStart);
// invoke csc conversion
pastix_csc_conversion(ad,au,adb,aub,sigma,icol,irow,
neq,nzs,symmetryflag,inputformat,jq,nzs3);
clock_gettime(CLOCK_MONOTONIC, &stepCscConversionEnd);
clock_gettime(CLOCK_MONOTONIC, &stepCleanUpStart);
// invoke cleanup
pastix_cleanup(neq,symmetryflag);
clock_gettime(CLOCK_MONOTONIC, &stepCleanUpEnd);
clock_gettime(CLOCK_MONOTONIC, &stepInitStart);
// scale the matrix with diagonals to 1
if( *inputformat !=3 && !noScale ){
ITG i=0;
#pragma omp parallel for
for(i=0;i<*neq;i++){
b[i] /= ad[i];
}
double normb=0;
#pragma omp parallel for reduction(+:normb)
for(i=0;i<*neq;i++){
normb += pow(b[i],2);
}
normb = sqrt(normb);
if( normb < 1e-9 ){
printf("||b|| getting too small with scaling, boost it statically\n");
double scal = 1e-6/normb;
//memcpy(b, b_backup, sizeof(double) * (*nrhs)*(*neq));
#pragma omp parallel for
for(i=0;i<*neq;i++){
b[i] *= scal;
}
#pragma omp parallel for
for(i=0;i<icolpastix[*neq]-1;i++){
aupastix[i] *= scal/ad[irowpastix[i]-1];
}
}
else{
#pragma omp parallel for
for(i=0;i<icolpastix[*neq]-1;i++){
aupastix[i] /= ad[irowpastix[i]-1];
}
}
}
//invoke init
pastix_init(ad,au,adb,aub,sigma,icol,irow,
neq,nzs,symmetryflag,inputformat,jq,nzs3);
clock_gettime(CLOCK_MONOTONIC, &stepInitEnd);
clock_gettime(CLOCK_MONOTONIC, &stepFactorizeStart);
// invoke factor
pastix_factor(ad,au,adb,aub,sigma,icol,irow,
neq,nzs,symmetryflag,inputformat,jq,nzs3);
clock_gettime(CLOCK_MONOTONIC, &stepFactorizeEnd);
clock_gettime(CLOCK_MONOTONIC, &stepSolveStart);
// if solve does not converge
ITG rc = pastix_solve_generic(b,neq,symmetryflag,nrhs);
if( rc == -1){
// Give up, if we tried it with double precision, use backup b otherwise
if(globDoublePrecision == 1){
printf("PaStiX could not converge to a valid result\n");
exit(5);
}
else{
memcpy(b, b_backup, sizeof(double) * (*nrhs)*(*neq));
printf("falling back to double precision\n");
globDoublePrecision = 1;
forceRedo = 1;
stickToDouble = 1;
mixedFailed++;
// call pastix_main recursively, but now in double precision
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
}
// make sure that we switch to double and do not reuse in the next iteration
dparm[DPARM_EPSILON_REFINEMENT] = 1e-12;
dparm[DPARM_EPSILON_MAGN_CTRL] = .0;
iparm[IPARM_ITERMAX] = 70;
iparm[IPARM_GMRES_IM] = 70;
// if we do not converge with mixed precision for the third time, permanently switch to double precision
if(mixedFailed <= 2){
stickToDouble = 0;
forceRedo = 1;
}
return;
}
else if( rc == -2){
memcpy(b, b_backup, sizeof(double) * (*nrhs)*(*neq));
printf("turning diagonal scaling off\n");
forceRedo = 1;
noScale = 1;
// call pastix_main recursively, but now in double precision
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
}
else{
forceRedo = 0;
}
clock_gettime(CLOCK_MONOTONIC, &stepSolveEnd);
clock_gettime(CLOCK_MONOTONIC, &end);
// compute benchmark times
pastixTime = (end.tv_sec - start.tv_sec) * 1e9;
pastixTime = (pastixTime + (end.tv_nsec - start.tv_nsec)) * 1e-9;
totalPastixTime += pastixTime;
clock_gettime(CLOCK_MONOTONIC, &totalCalculixTimeEnd);
totalCCXTime = (totalCalculixTimeEnd.tv_sec - totalCalculixTimeStart.tv_sec) * 1e9;
totalCCXTime = (totalCCXTime + (totalCalculixTimeEnd.tv_nsec - totalCalculixTimeStart.tv_nsec)) * 1e-9;
CCXwithoutPastix = totalCCXTime - totalPastixTime;
stepCscConversion = (stepCscConversionEnd.tv_sec - stepCscConversionStart.tv_sec) * 1e9;
stepCscConversion = (stepCscConversion + (stepCscConversionEnd.tv_nsec - stepCscConversionStart.tv_nsec)) * 1e-9;
stepInit = (stepInitEnd.tv_sec - stepInitStart.tv_sec) * 1e9;
stepInit = (stepInit + (stepInitEnd.tv_nsec - stepInitStart.tv_nsec)) * 1e-9;
stepFactorize = (stepFactorizeEnd.tv_sec - stepFactorizeStart.tv_sec) * 1e9;
stepFactorize = (stepFactorize + (stepFactorizeEnd.tv_nsec - stepFactorizeStart.tv_nsec)) * 1e-9;
stepSolve = (stepSolveEnd.tv_sec - stepSolveStart.tv_sec) * 1e9;
stepSolve = (stepSolve + (stepSolveEnd.tv_nsec - stepSolveStart.tv_nsec)) * 1e-9;
stepCleanUp = (stepCleanUpEnd.tv_sec - stepCleanUpStart.tv_sec) * 1e9;
stepCleanUp = (stepCleanUp + (stepCleanUpEnd.tv_nsec - stepCleanUpStart.tv_nsec)) * 1e-9;
// upate iteration timer
totalIterations++;
if(!redo)
totalReused++;
// benchmark output
printf("________________________________________\n\n");
printf("CSC Conversion Time: %lf\n", stepCscConversion);
printf("Init Time: %lf\n", stepInit);
printf("Factorize Time: %lf\n", stepFactorize);
printf("Solve Time: %lf\n", stepSolve);
printf("Clean up Time: %lf\n", stepCleanUp);
printf("---------------------------------\n");
printf("Sum: %lf\n", pastixTime);
printf("\n");
printf("Total PaStiX Time: %lf\n", totalPastixTime);
printf("CCX without PaStiX Time: %lf\n", CCXwithoutPastix);
printf("Share of PaStiX Time: %lf\n", totalPastixTime / totalCCXTime );
printf("Total Time: %lf\n", totalCCXTime);
printf("Reusability: %d : %d \n", totalReused, totalIterations);
printf("________________________________________\n\n");
SFREE(b_backup);
b_backup = NULL;
return;
}
void pastix_factor_main(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
mode = BASIC;
usage = MULTI_SOLVE;
pastix_factor_main_generic(ad, au, adb, aub, sigma, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3);
return;
}
void pastix_factor_main_as(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
mode = AS;
usage = MULTI_SOLVE;
pastix_factor_main_generic(ad, au, adb, aub, sigma, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3);
return;
}
void pastix_main(double *ad, double *au, double *adb, double *aub,
double *sigma,double *b, ITG *icol, ITG *irow,
ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat,
ITG *jq, ITG *nzs3,ITG *nrhs){
mode = BASIC;
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
return;
}
void pastix_main_as(double *ad, double *au, double *adb, double *aub,
double *sigma,double *b, ITG *icol, ITG *irow,
ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat,
ITG *jq, ITG *nzs3,ITG *nrhs){
mode = AS;
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
return;
}
void pastix_set_globals(char mode){
if( modePrev != mode ){
pastix_data_object *temp,*temp2;
switch(mode){
case BASIC:
temp = &pastix_mode_basic;
temp2 = &pastix_mode_as;
break;
case AS:
temp = &pastix_mode_as;
temp2 = &pastix_mode_basic;
break;
}
// saving old data set
temp2->totalIterations = totalIterations;
temp2->totalReused = totalReused;
// Current sparse matrix in STI format
temp2->icolTotal = icolTotal;
temp2->irowTotal = irowTotal;
// Matrix data from previous iteration
temp2->neqPrev = neqPrev;
temp2->nzsPrev = nzsPrev;
temp2->icolPrev = icolPrev;
temp2->irowPrev = irowPrev;
temp2->jqPrev = jqPrev;
temp2->inputformatPrev = inputformatPrev;
temp2->basePrev = basePrev;
temp2->offsetPrev = offsetPrev;
// Current sparse matrix in CSC
temp2->aupastix = aupastix;
temp2->icolpastix = icolpastix;
temp2->irowpastix = irowpastix;
// Global variable that indicates whether we are currently reusing or not
temp2->redo = redo;
// PaStiX configuration
temp2->iparm = iparm;
temp2->dparm = dparm;
temp2->pastix_data = pastix_data;
temp2->spm = spm;
// GPU active or not
temp2->gpu = gpu;
// Store how many nzs the merged Matrix has
temp2->nzsTotal = nzsTotal;
// Size of allocated space for sparse matrix
temp2->pastix_nnzBound = pastix_nnzBound;
// Number of iterations that failed with mixed precision
temp2->mixedFailed = mixedFailed;
// indicates whether this is the first invocation of PaStiX or not
temp2->firstIter = firstIter;
// When this flag is activated, PaStiX will not reuse in the next iteration
temp2->forceRedo = forceRedo;
// Use double or mixed precision
temp2->mixed = mixed;
temp2->globDoublePrecision = globDoublePrecision;
// This is set to one, when to many iterations with mixed precision did not converge
temp2->stickToDouble = stickToDouble;
// Pointers for faster matrix transpose
temp2->irowacc = irowacc;
temp2->irowPrediction = irowPrediction;
// Number of threads
// nthread_mkl=0;
// setting new data set
totalIterations = temp->totalIterations;
totalReused = temp->totalReused;
// Current sparse matrix in STI format
icolTotal = temp->icolTotal;
irowTotal = temp->irowTotal;
// Matrix data from previous iteration
neqPrev = temp->neqPrev;
nzsPrev = temp->nzsPrev;
icolPrev = temp->icolPrev;
irowPrev = temp->irowPrev;
jqPrev = temp->jqPrev;
inputformatPrev = temp->inputformatPrev;
basePrev = temp->basePrev;
offsetPrev = temp->offsetPrev;
// Current sparse matrix in CSC
aupastix = temp->aupastix;
icolpastix = temp->icolpastix;
irowpastix = temp->irowpastix;
// Global variable that indicates whether we are currently reusing or not
redo = temp->redo;
// PaStiX configuration
iparm = temp->iparm;
dparm = temp->dparm;
pastix_data = temp->pastix_data;
spm = temp->spm;
// GPU active or not
gpu = temp->gpu;
// Store how many nzs the merged Matrix has
nzsTotal = temp->nzsTotal;
// Size of allocated space for sparse matrix
pastix_nnzBound = temp->pastix_nnzBound;
// Number of iterations that failed with mixed precision
mixedFailed = temp->mixedFailed;
// indicates whether this is the first invocation of PaStiX or not
firstIter = temp->firstIter;
// When this flag is activated, PaStiX will not reuse in the next iteration
forceRedo = temp->forceRedo;
// Use double or mixed precision
mixed = temp->mixed;
globDoublePrecision = temp->globDoublePrecision;
// This is set to one, when to many iterations with mixed precision did not converge
stickToDouble = temp->stickToDouble;
// Pointers for faster matrix transpose
irowacc = temp->irowacc;
irowPrediction = temp->irowPrediction;
// Number of threads
// nthread_mkl=0;
if( firstIter ){
forceRedo=1;
}
}
modePrev = mode;
return;
}
#endif
|
variable_utils.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Ruben Zorrilla
// Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_VARIABLE_UTILS )
#define KRATOS_VARIABLE_UTILS
/* System includes */
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/checks.h"
#include "utilities/parallel_utilities.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class VariableUtils
* @ingroup KratosCore
* @brief This class implements a set of auxiliar, already parallelized, methods to
* perform some common tasks related with the variable values and fixity.
* @details The methods are exported to python in order to add this improvements to the python interface
* @author Riccardo Rossi
* @author Ruben Zorrilla
* @author Vicente Mataix Ferrandiz
*/
class KRATOS_API(KRATOS_CORE) VariableUtils
{
public:
///@name Type Definitions
///@{
/// The node type
typedef ModelPart::NodeType NodeType;
/// The condition type
typedef ModelPart::ConditionType ConditionType;
/// The element type
typedef ModelPart::ElementType ElementType;
/// We create the Pointer related to VariableUtils
KRATOS_CLASS_POINTER_DEFINITION(VariableUtils);
/// The nodes container
typedef ModelPart::NodesContainerType NodesContainerType;
/// The conditions container
typedef ModelPart::ConditionsContainerType ConditionsContainerType;
/// The elements container
typedef ModelPart::ElementsContainerType ElementsContainerType;
/// A definition of the double variable
typedef Variable< double > DoubleVarType;
/// A definition of the array variable
typedef Variable< array_1d<double, 3 > > ArrayVarType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
/** Destructor.
*/
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Copies the nodal value of a variable from an origin model
* part nodes to the nodes in a destination model part. It is assumed that
* both origin and destination model parts have the same number of nodes.
* @param rVariable reference to the variable to get the value from
* @param rDestinationVariable reference to the variable to be set
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartNodalVar(
const TVarType& rVariable,
const TVarType& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const unsigned int BuffStep = 0)
{
const int n_orig_nodes = rOriginModelPart.NumberOfNodes();
const int n_dest_nodes = rDestinationModelPart.NumberOfNodes();
KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) << "Origin and destination model parts have different number of nodes."
<< "\n\t- Number of origin nodes: " << n_orig_nodes
<< "\n\t- Number of destination nodes: " << n_dest_nodes << std::endl;
#pragma omp parallel for
for(int i_node = 0; i_node < n_orig_nodes; ++i_node){
auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node;
const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node;
const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep);
it_dest_node->GetSolutionStepValue(rDestinationVariable, BuffStep) = r_value;
}
}
/**
* @brief Copies the nodal value of a variable from an origin model
* part nodes to the nodes in a destination model part. It is assumed that
* both origin and destination model parts have the same number of nodes.
* @param rVariable reference to the variable to get the value from and to save in
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartNodalVar(
const TVarType& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const unsigned int BuffStep = 0)
{
this->CopyModelPartNodalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep);
}
template< class TVarType >
void CopyModelPartNodalVarToNonHistoricalVar(
const TVarType &rVariable,
const TVarType &rDestinationVariable,
const ModelPart &rOriginModelPart,
ModelPart &rDestinationModelPart,
const unsigned int BuffStep = 0)
{
const int n_orig_nodes = rOriginModelPart.NumberOfNodes();
const int n_dest_nodes = rDestinationModelPart.NumberOfNodes();
KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) <<
"Origin and destination model parts have different number of nodes." <<
"\n\t- Number of origin nodes: " << n_orig_nodes <<
"\n\t- Number of destination nodes: " << n_dest_nodes << std::endl;
#pragma omp parallel for
for(int i_node = 0; i_node < n_orig_nodes; ++i_node){
auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node;
const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node;
const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep);
it_dest_node->GetValue(rDestinationVariable) = r_value;
}
}
template< class TVarType >
void CopyModelPartNodalVarToNonHistoricalVar(
const TVarType &rVariable,
const ModelPart &rOriginModelPart,
ModelPart &rDestinationModelPart,
const unsigned int BuffStep = 0)
{
this->CopyModelPartNodalVarToNonHistoricalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
KRATOS_ERROR_IF(
rOriginModelPart.FullName() == rDestinationModelPart.FullName() &&
rOriginVariable == rDestinationVariable &&
ReadBufferStep == WriteBufferStep)
<< "Trying to copy flagged nodal solution step values with the same origin and destination model parts/variables/buffer steps. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << ", buffer step: " << ReadBufferStep << " ) !";
KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable))
<< rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( "
<< rOriginModelPart.Name() << " ).";
KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable))
<< rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( "
<< rDestinationModelPart.Name() << " ).";
KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize())
<< "Origin model part ( " << rOriginModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ].";
KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize())
<< "Destination model part ( " << rDestinationModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ].";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.FastGetSolutionStepValue(
rDestinationVariable, WriteBufferStep) = rValue;
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep);
});
rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart,
rFlag, CheckValue, ReadBufferStep, WriteBufferStep);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart,
rFlag, CheckValue, ReadBufferStep, WriteBufferStep);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
KRATOS_TRY
KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable))
<< rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( "
<< rOriginModelPart.Name() << " ).";
KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize())
<< "Origin model part ( " << rOriginModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ].";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.SetValue(rDestinationVariable, rValue);
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep);
});
rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart,
rFlag, CheckValue, ReadBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart,
rFlag, CheckValue, ReadBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int ReadBufferStep = 0)
{
CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar(
rVariable, rVariable, rModelPart, rModelPart,
rFlag, CheckValue, ReadBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
KRATOS_TRY
KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable))
<< rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( "
<< rDestinationModelPart.Name() << " ).";
KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize())
<< "Destination model part ( " << rDestinationModelPart.Name()
<< " ) buffer size is smaller or equal than read buffer size [ "
<< rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ].";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.FastGetSolutionStepValue(
rDestinationVariable, WriteBufferStep) = rValue;
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.GetValue(rOriginVariable);
});
rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart,
rFlag, CheckValue, WriteBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart,
rFlag, CheckValue, WriteBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
const Variable<TDataType>& rVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true,
const unsigned int WriteBufferStep = 0)
{
CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar(
rVariable, rVariable, rModelPart, rModelPart,
rFlag, CheckValue, WriteBufferStep);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
KRATOS_ERROR_IF(
rOriginModelPart.FullName() == rDestinationModelPart.FullName() &&
rOriginVariable == rDestinationVariable
) << "Trying to copy flagged nodal non-historical values with the same model parts/variables. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << " ) !";
CopyModelPartFlaggedVariable<NodesContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](NodeType& rDestNode, const TDataType& rValue) {
rDestNode.SetValue(rDestinationVariable, rValue);
},
[&](const NodeType& rOriginNode) -> const TDataType& {
return rOriginNode.GetValue(rOriginVariable);
});
rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable);
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedElementVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable)
<< "Trying to copy flagged elemental variable data with the same model "
"parts/variables. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << " ) !";
CopyModelPartFlaggedVariable<ElementsContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](ElementType& rDestElement, const TDataType& rValue) {
rDestElement.SetValue(rDestinationVariable, rValue);
},
[&](const ElementType& rOriginElement) -> const TDataType& {
return rOriginElement.GetValue(rOriginVariable);
});
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedElementVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedElementVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedElementVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedElementVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedConditionVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable)
<< "Trying to copy flagged condition variable data with the same model "
"parts/variables. This is not permitted ( Origin model part: "
<< rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name()
<< ", variable: " << rOriginVariable.Name() << " ) !";
CopyModelPartFlaggedVariable<ConditionsContainerType>(
rOriginModelPart, rDestinationModelPart, rFlag, CheckValue,
[&](ConditionType& rDestCondition, const TDataType& rValue) {
rDestCondition.SetValue(rDestinationVariable, rValue);
},
[&](const ConditionType& rOriginCondition) -> const TDataType& {
return rOriginCondition.GetValue(rOriginVariable);
});
KRATOS_CATCH("");
}
template <class TDataType>
void CopyModelPartFlaggedConditionVar(
const Variable<TDataType>& rOriginVariable,
const Variable<TDataType>& rDestinationVariable,
ModelPart& rModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedConditionVar(
rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue);
}
template <class TDataType>
void CopyModelPartFlaggedConditionVar(
const Variable<TDataType>& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue = true)
{
CopyModelPartFlaggedConditionVar(
rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue);
}
/**
* @brief Copies the elemental value of a variable from an origin model
* part elements to the elements in a destination model part. It is assumed that
* both origin and destination model parts have the same number of elements.
* @param rVariable reference to the variable to be set
* @param rOriginModelPart origin model part from where the values are retrieved
* @param rDestinationModelPart destination model part to where the values are copied to
* @param BuffStep buffer step
*/
template< class TVarType >
void CopyModelPartElementalVar(
const TVarType& rVariable,
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart){
const int n_orig_elems = rOriginModelPart.NumberOfElements();
const int n_dest_elems = rDestinationModelPart.NumberOfElements();
KRATOS_ERROR_IF_NOT(n_orig_elems == n_dest_elems) << "Origin and destination model parts have different number of elements."
<< "\n\t- Number of origin elements: " << n_orig_elems
<< "\n\t- Number of destination elements: " << n_dest_elems << std::endl;
#pragma omp parallel for
for(int i_elems = 0; i_elems < n_orig_elems; ++i_elems){
auto it_dest_elems = rDestinationModelPart.ElementsBegin() + i_elems;
const auto &it_orig_elems = rOriginModelPart.ElementsBegin() + i_elems;
const auto &r_value = it_orig_elems->GetValue(rVariable);
it_dest_elems->SetValue(rVariable,r_value);
}
}
/**
* @brief Sets the nodal value of a scalar variable
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template <class TVarType>
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetScalarVar(
const TVarType &rVariable,
const double Value,
NodesContainerType &rNodes)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVariable) = Value;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable (considering flag)
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template< class TVarType >
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetScalarVarForFlag(
const TVarType& rVariable,
const double Value,
NodesContainerType& rNodes,
const Flags Flag,
const bool Check = true
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
if (it_node->Is(Flag) == Check) it_node->FastGetSolutionStepValue(rVariable) = Value;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a vector variable
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetVectorVar(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes
);
/**
* @brief Sets the nodal value of a vector variable (considering flag)
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable")
void SetVectorVarForFlag(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes,
const Flags Flag,
const bool Check = true
);
/**
* @brief Sets the nodal value of a scalar variable
* @tparam TDataType Variable data type
* @tparam Variable<TDataType> Variable type
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template<class TDataType, class TVarType = Variable<TDataType> >
void SetVariable(
const TVarType& rVariable,
const TDataType& rValue,
NodesContainerType& rNodes
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVariable) = rValue;
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable (considering flag)
* @tparam TDataType Variable data type
* @tparam Variable<TDataType> Variable type
* @param rVariable reference to the scalar variable to be set
* @param rValue Value to be set
* @param rNodes reference to the objective node set
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template <class TDataType, class TVarType = Variable<TDataType>>
void SetVariable(
const TVarType &rVariable,
const TDataType &rValue,
NodesContainerType &rNodes,
const Flags Flag,
const bool CheckValue = true)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rNodes.size()); ++k)
{
auto it_node = rNodes.begin() + k;
if (it_node->Is(Flag) == CheckValue)
{
it_node->FastGetSolutionStepValue(rVariable) = rValue;
}
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of any variable to zero
* @param rVariable reference to the scalar variable to be set
* @param rNodes reference to the objective node set
*/
template< class TType , class TContainerType>
void SetNonHistoricalVariableToZero(
const Variable< TType >& rVariable,
TContainerType& rContainer)
{
KRATOS_TRY
this->SetNonHistoricalVariable(rVariable, rVariable.Zero(), rContainer);
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of any variable to zero
* @param rVariable reference to the scalar variable to be set
* @param rNodes reference to the objective node set
*/
template< class TType >
void SetHistoricalVariableToZero(
const Variable< TType >& rVariable,
NodesContainerType& rNodes)
{
KRATOS_TRY
this->SetVariable(rVariable, rVariable.Zero(), rNodes);
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a scalar variable non historical
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rNodes reference to the objective node set
*/
template< class TVarType >
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable")
void SetNonHistoricalScalarVar(
const TVarType& rVariable,
const double Value,
NodesContainerType& rNodes
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->SetValue(rVariable, Value);
}
KRATOS_CATCH("")
}
/**
* @brief Sets the nodal value of a vector non historical variable
* @param rVariable reference to the vector variable to be set
* @param Value array containing the Value to be set
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable")
void SetNonHistoricalVectorVar(
const ArrayVarType& rVariable,
const array_1d<double, 3 >& Value,
NodesContainerType& rNodes
);
/**
* @brief Sets the container value of any type of non historical variable
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rContainer Reference to the objective container
*/
template< class TType, class TContainerType, class TVarType = Variable< TType >>
void SetNonHistoricalVariable(
const TVarType& rVariable,
const TType& Value,
TContainerType& rContainer
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = rContainer.begin() + k;
it_cont->SetValue(rVariable, Value);
}
KRATOS_CATCH("")
}
/**
* @brief Sets the container value of any type of non historical variable (considering flag)
* @param rVariable reference to the scalar variable to be set
* @param Value Value to be set
* @param rContainer Reference to the objective container
* @param Flag The flag to be considered in the assignation
* @param Check What is checked from the flag
*/
template< class TType, class TContainerType, class TVarType = Variable< TType >>
void SetNonHistoricalVariable(
const TVarType& rVariable,
const TType& rValue,
TContainerType& rContainer,
const Flags Flag,
const bool Check = true
)
{
KRATOS_TRY
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = rContainer.begin() + k;
if (it_cont->Is(Flag) == Check) {
it_cont->SetValue(rVariable, rValue);
}
}
KRATOS_CATCH("")
}
/**
* @brief Clears the container data value container
* @param rContainer Reference to the objective container
*/
template< class TContainerType>
void ClearNonHistoricalData(TContainerType& rContainer)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Data().Clear();
}
KRATOS_CATCH("")
}
/**
* @brief Distributes variable values in TContainerType container to nodes
*
* This method distributes variables values stored in TContainerType data value container in rModelPart
* to nodes. Constant weighting is used for each node based on rWeightVariable value. The result
* is stored in nodal non-historical data value container under the same rVariable. If IsInverseWeightProvided
* is true, then the weights provided by rWeightVariable is inverted to get nodal weight. Otherwise, the value
* given by rWeightVariable is used as weight.
*
*
* @tparam TDataType Data type
* @tparam TContainerType ContainerType of model part
* @tparam TWeightDataType Data type of weight variable (this should be either int or double)
* @param rModelPart Model part
* @param rVariable Variable to be distributed
* @param rWeightVariable Variable which holds weight to distribute entity values to nodes
* @param IsInverseWeightProvided Whether the weight is provided as inverse or not.
*/
template <class TDataType, class TContainerType, class TWeightDataType>
void WeightedAccumulateVariableOnNodes(
ModelPart& rModelPart,
const Variable<TDataType>& rVariable,
const Variable<TWeightDataType>& rWeightVariable,
const bool IsInverseWeightProvided = false);
/**
* @brief Sets a flag according to a given status over a given container
* @param rFlag flag to be set
* @param rFlagValue flag value to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void SetFlag(
const Flags& rFlag,
const bool& rFlagValue,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Set(rFlag, rFlagValue);
}
KRATOS_CATCH("")
}
/**
* @brief Flips a flag over a given container
* @param rFlag flag to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void ResetFlag(
const Flags& rFlag,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Reset(rFlag);
}
KRATOS_CATCH("")
}
/**
* @brief Flips a flag over a given container
* @param rFlag flag to be set
* @param rContainer Reference to the objective container
*/
template< class TContainerType >
void FlipFlag(
const Flags& rFlag,
TContainerType& rContainer
)
{
KRATOS_TRY
const auto it_cont_begin = rContainer.begin();
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) {
auto it_cont = it_cont_begin + k;
it_cont->Flip(rFlag);
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of a non-historical vector variable and sets it in other variable
* @param OriginVariable reference to the origin vector variable
* @param SavedVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable")
void SaveVectorVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical scalar variable and sets it in other variable
* @param OriginVariable reference to the origin scalar variable
* @param SavedVariable reference to the destination scalar variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable")
void SaveScalarVar(
const DoubleVarType& OriginVariable,
const DoubleVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical variable and saves it in another variable
* For a nodal container, this takes the value of a non-historical variable and saves it in another one
* @tparam TDataType The variable data type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rSavedVariable Reference to the destination variable
* @param rNodesContainer Reference to the nodal container
*/
template< class TDataType, class TVariableType = Variable<TDataType> >
void SaveVariable(
const TVariableType &rOriginVariable,
const TVariableType &rSavedVariable,
NodesContainerType &rNodesContainer)
{
KRATOS_TRY
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) {
auto it_node = rNodesContainer.begin() + i_node;
it_node->SetValue(rSavedVariable, it_node->FastGetSolutionStepValue(rOriginVariable));
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of a non-historical vector variable and sets it in other non-historical variable
* @param OriginVariable reference to the origin vector variable
* @param SavedVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable")
void SaveVectorNonHistoricalVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical scalar variable and sets it in other non-historical variable
* @param OriginVariable reference to the origin scalar variable
* @param SavedVariable reference to the destination scalar variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable")
void SaveScalarNonHistoricalVar(
const DoubleVarType& OriginVariable,
const DoubleVarType& SavedVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of a non-historical variable and saves it in another historical variable
* For a non-nodal container, this method takes the value of an origin variable and saves it in a destination one
* @tparam TDataType The variable data type
* @tparam TContainerType The container type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rSavedVariable Reference to the destination variable
* @param rContainer Reference to the container of interest
*/
template< class TDataType, class TContainerType, class TVariableType = Variable<TDataType> >
void SaveNonHistoricalVariable(
const TVariableType &rOriginVariable,
const TVariableType &rSavedVariable,
TContainerType &rContainer
)
{
KRATOS_TRY
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) {
auto it_cont = rContainer.begin() + i;
it_cont->SetValue(rSavedVariable, it_cont->GetValue(rOriginVariable));
}
KRATOS_CATCH("")
}
/**
* @brief Takes the value of an historical vector variable and sets it in other variable
* @param OriginVariable reference to the origin vector variable
* @param DestinationVariable reference to the destination vector variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable")
void CopyVectorVar(
const ArrayVarType& OriginVariable,
const ArrayVarType& DestinationVariable,
NodesContainerType& rNodes
);
/**
* @brief Takes the value of an historical double variable and sets it in other variable
* @param OriginVariable reference to the origin double variable
* @param DestinationVariable reference to the destination double variable
* @param rNodes reference to the objective node set
*/
KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable")
void CopyScalarVar(
const DoubleVarType &OriginVariable,
const DoubleVarType &DestinationVariable,
NodesContainerType &rNodes);
/**
* @brief Takes the value of an historical variable and sets it in another variable
* This function takes the value of an historical variable and sets in another
* variable in all the nodes of the provided container.
* @tparam TDataType The variable data type
* @tparam Variable<TDataType> The variable type
* @param rOriginVariable Reference to the origin variable
* @param rDestinationVariable Reference to the destination variable
* @param rNodesContainer Reference to the nodes container
*/
template< class TDataType, class TVariableType = Variable<TDataType> >
void CopyVariable(
const TVariableType &rOriginVariable,
const TVariableType &rDestinationVariable,
NodesContainerType &rNodesContainer)
{
KRATOS_TRY
#pragma omp parallel for
for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) {
auto it_node = rNodesContainer.begin() + i_node;
it_node->FastGetSolutionStepValue(rDestinationVariable) = it_node->FastGetSolutionStepValue(rOriginVariable);
}
KRATOS_CATCH("")
}
/**
* @brief Returns a list of nodes filtered using the given double variable and value
* @param Variable reference to the double variable to be filtered
* @param Value Filtering Value
* @param rOriginNodes Reference to the objective node set
* @return selected_nodes: List of filtered nodes
*/
NodesContainerType SelectNodeList(
const DoubleVarType& Variable,
const double Value,
const NodesContainerType& rOriginNodes
);
/**
* @brief Checks if all the nodes of a node set has the specified variable
* @param rVariable reference to a variable to be checked
* @param rNodes reference to the nodes set to be checked
* @return 0: if succeeds, return 0
*/
template<class TVarType>
int CheckVariableExists(
const TVarType& rVariable,
const NodesContainerType& rNodes
)
{
KRATOS_TRY
for (auto& i_node : rNodes)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(rVariable, i_node);
return 0;
KRATOS_CATCH("");
}
/**
* @brief Fixes or frees a variable for all of the nodes in the list. The dof has to exist.
* @param rVar reference to the variable to be fixed or freed
* @param IsFixed if true fixes, if false frees
* @param rNodes reference to the nodes set to be frixed or freed
*/
template< class TVarType >
void ApplyFixity(
const TVarType& rVar,
const bool IsFixed,
NodesContainerType& rNodes
)
{
KRATOS_TRY
if (rNodes.size() != 0) {
// checking the first node to avoid error being thrown in parallel region
KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << rNodes.begin()->Id() << "!" << std::endl;
#ifdef KRATOS_DEBUG
for (const auto& r_node : rNodes) {
KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << r_node.Id() << "!" << std::endl;
}
#endif
CheckVariableExists(rVar, rNodes);
if (IsFixed) {
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->pGetDof(rVar)->FixDof();
}
} else {
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->pGetDof(rVar)->FreeDof();
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Fixes/Frees dofs based on a flag
*
* This method fixes/frees given rVariable, if rFlag matches CheckValue provided for that
* specific node.
*
* @tparam TVarType Variable type
* @param rVariable Variable to be fixed or freed
* @param IsFixed True to fix variable, false to free variable
* @param rNodes Nodes container
* @param rFlag Flag to be checked to fix or free
* @param CheckValue Flag value which is checked against
*/
template< class TVarType >
void ApplyFixity(
const TVarType& rVariable,
const bool IsFixed,
NodesContainerType& rNodes,
const Flags& rFlag,
const bool CheckValue = true)
{
KRATOS_TRY
if (rNodes.size() != 0) {
// checking the first node to avoid error being thrown in parallel region
KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVariable))
<< "Trying to fix/free dof of variable " << rVariable.Name()
<< " but this dof does not exist in node #"
<< rNodes.begin()->Id() << "!" << std::endl;
#ifdef KRATOS_DEBUG
for (const auto& r_node : rNodes) {
KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVariable))
<< "Trying to fix/free dof of variable " << rVariable.Name()
<< " but this dof does not exist in node #" << r_node.Id()
<< "!" << std::endl;
}
#endif
CheckVariableExists(rVariable, rNodes);
if (IsFixed) {
BlockPartition<NodesContainerType>(rNodes).for_each(
[&rVariable, &rFlag, CheckValue](NodeType& rNode) {
if (rNode.Is(rFlag) == CheckValue) {
rNode.pGetDof(rVariable)->FixDof();
}
});
}
else {
BlockPartition<NodesContainerType>(rNodes).for_each(
[&rVariable, &rFlag, CheckValue](NodeType& rNode) {
if (rNode.Is(rFlag) == CheckValue) {
rNode.pGetDof(rVariable)->FreeDof();
}
});
}
}
KRATOS_CATCH("");
}
/**
* @brief Loops along a vector data to set its values to the nodes contained in a node set.
* @note This function is suitable for scalar historical variables, since each
* one of the values in the data vector is set to its correspondent node. Besides,
* the values must be sorted as the nodes are (value i corresponds to node i).
* @param rVar reference to the variable to be fixed or freed
* @param rData rData vector. Note that its lenght must equal the number of nodes
* @param rNodes reference to the nodes set to be set
*/
template< class TVarType >
void ApplyVector(
const TVarType& rVar,
const Vector& rData,
NodesContainerType& rNodes
)
{
KRATOS_TRY
if(rNodes.size() != 0 && rNodes.size() == rData.size()) {
// First we do a check
CheckVariableExists(rVar, rNodes);
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) {
NodesContainerType::iterator it_node = rNodes.begin() + k;
it_node->FastGetSolutionStepValue(rVar) = rData[k];
}
} else
KRATOS_ERROR << "There is a mismatch between the size of data array and the number of nodes ";
KRATOS_CATCH("")
}
/**
* @brief Returns the nodal value summation of a non-historical vector variable.
* @param rVar reference to the vector variable to summed
* @param rModelPart reference to the model part that contains the objective node set
* @return sum_value: summation vector result
*/
array_1d<double, 3> SumNonHistoricalNodeVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the nodal value summation of a non-historical scalar variable.
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective node set
* @return sum_value: summation result
*/
template< class TVarType >
double SumNonHistoricalNodeScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_nodes_array = r_local_mesh.Nodes();
const auto it_node_begin = r_nodes_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_nodes_array.size()); ++k) {
const auto it_node = it_node_begin + k;
sum_value += it_node->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief This method accumulates and return a variable value
* For a nodal historical variable, this method accumulates and
* returns the summation in a model part.
* @tparam TDataType Variable datatype
* @tparam Variable<TDataType> Variable type
* @param rVariable Nodal historical variable to be accumulated
* @param rModelPart Model part in where the summation is done
* @param BuffStep Buffer position
* @return TDataType Value of the summation
*/
template< class TDataType, class TVarType = Variable<TDataType> >
TDataType SumHistoricalVariable(
const TVarType &rVariable,
const ModelPart &rModelPart,
const unsigned int BuffStep = 0
)
{
KRATOS_TRY
TDataType sum_value;
AuxiliaryInitializeValue(sum_value);
const auto &r_communicator = rModelPart.GetCommunicator();
const int n_nodes = r_communicator.LocalMesh().NumberOfNodes();
#pragma omp parallel firstprivate(n_nodes)
{
TDataType private_sum_value;
AuxiliaryInitializeValue(private_sum_value);
#pragma omp for
for (int i_node = 0; i_node < n_nodes; ++i_node) {
const auto it_node = r_communicator.LocalMesh().NodesBegin() + i_node;
private_sum_value += it_node->GetSolutionStepValue(rVariable, BuffStep);
}
AuxiliaryAtomicAdd(private_sum_value, sum_value);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief Returns the condition value summation of a historical vector variable
* @param rVar reference to the vector variable to be summed
* @param rModelPart reference to the model part that contains the objective condition set
* @return sum_value: summation result
*/
array_1d<double, 3> SumConditionVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the condition value summation of a historical scalar variable
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective condition set
* @return sum_value: summation result
*/
template< class TVarType >
double SumConditionScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_conditions_array = r_local_mesh.Conditions();
const auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_conditions_array.size()); ++k) {
const auto it_cond = it_cond_begin + k;
sum_value += it_cond->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief Returns the element value summation of a historical vector variable
* @param rVar reference to the vector variable to be summed
* @param rModelPart reference to the model part that contains the objective element set
* @return sum_value: summation result
*/
array_1d<double, 3> SumElementVectorVariable(
const ArrayVarType& rVar,
const ModelPart& rModelPart
);
/**
* @brief Returns the element value summation of a historical scalar variable
* @param rVar reference to the scalar variable to be summed
* @param rModelPart reference to the model part that contains the objective element set
* @return sum_value: summation result
*/
template< class TVarType >
double SumElementScalarVariable(
const TVarType& rVar,
const ModelPart& rModelPart
)
{
KRATOS_TRY
double sum_value = 0.0;
// Getting info
const auto& r_communicator = rModelPart.GetCommunicator();
const auto& r_local_mesh = r_communicator.LocalMesh();
const auto& r_elements_array = r_local_mesh.Elements();
const auto it_elem_begin = r_elements_array.begin();
#pragma omp parallel for reduction(+:sum_value)
for (int k = 0; k < static_cast<int>(r_elements_array.size()); ++k) {
const auto it_elem = it_elem_begin + k;
sum_value += it_elem->GetValue(rVar);
}
return r_communicator.GetDataCommunicator().SumAll(sum_value);
KRATOS_CATCH("")
}
/**
* @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel
* @param rVar The variable to be added as DoF
* @param rModelPart reference to the model part that contains the objective element set
*/
template< class TVarType >
void AddDof(
const TVarType& rVar,
ModelPart& rModelPart
)
{
KRATOS_TRY
// First we do a chek
if(rModelPart.NumberOfNodes() != 0)
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: Variable : " << rVar << "not included in the Solution step data ";
rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar);
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) {
auto it_node = rModelPart.NodesBegin() + k;
it_node->AddDof(rVar);
}
KRATOS_CATCH("")
}
/**
* @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel
* @param rVar The variable to be added as DoF
* @param rReactionVar The corresponding reaction to the added DoF
* @param rModelPart reference to the model part that contains the objective element set
*/
template< class TVarType >
void AddDofWithReaction(
const TVarType& rVar,
const TVarType& rReactionVar,
ModelPart& rModelPart
)
{
KRATOS_TRY
if(rModelPart.NumberOfNodes() != 0) {
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: DoF Variable : " << rVar << "not included in the Soluttion step data ";
KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rReactionVar)) << "ERROR:: Reaction Variable : " << rReactionVar << "not included in the Soluttion step data ";
}
// If in debug we do a check for all nodes
#ifdef KRATOS_DEBUG
CheckVariableExists(rVar, rModelPart.Nodes());
CheckVariableExists(rReactionVar, rModelPart.Nodes());
#endif
rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar, &rReactionVar);
#pragma omp parallel for
for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) {
auto it_node = rModelPart.NodesBegin() + k;
it_node->AddDof(rVar,rReactionVar);
}
KRATOS_CATCH("")
}
/**
* @brief This method checks the variable keys
* @return True if all the keys are correct
*/
bool CheckVariableKeys();
/**
* @brief This method updates the current nodal coordinates back to the initial coordinates
* @param rNodes the nodes to be updated
*/
void UpdateCurrentToInitialConfiguration(const ModelPart::NodesContainerType& rNodes);
/**
* @param rNodes the nodes to be updated
* @brief This method updates the initial nodal coordinates to the current coordinates
*/
void UpdateInitialToCurrentConfiguration(const ModelPart::NodesContainerType& rNodes);
/**
* @brief This method updates the current coordinates
* For each node, this method takes the value of the provided variable and updates the
* current position as the initial position (X0, Y0, Z0) plus such variable value
* @param rNodes
* @param rUpdateVariable variable to retrieve the updating values from
*/
void UpdateCurrentPosition(
const ModelPart::NodesContainerType& rNodes,
const ArrayVarType& rUpdateVariable = DISPLACEMENT,
const IndexType BufferPosition = 0
);
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief Auxiliary double initialize method
* Auxiliary method to initialize a double value
* @param rValue Variable to initialize
*/
void AuxiliaryInitializeValue(double &rValue);
/**
* @brief Auxiliary array initialize method
* Auxiliary method to initialize an array value
* @param rValue Variable to initialize
*/
void AuxiliaryInitializeValue(array_1d<double,3> &rValue);
/**
* @brief Auxiliary scalar reduce method
* Auxiliary method to perform the reduction of a scalar value
* @param rPrivateValue Private variable to reduce
* @param rSumValue Variable to save the reduction
*/
void AuxiliaryAtomicAdd(
const double &rPrivateValue,
double &rSumValue
);
/**
* @brief Auxiliary array reduce method
* Auxiliary method to perform the reduction of an array value
* @param rPrivateValue Private variable to reduce
* @param rSumValue Variable to save the reduction
*/
void AuxiliaryAtomicAdd(
const array_1d<double,3> &rPrivateValue,
array_1d<double,3> &rSumValue
);
/**
* @brief This is auxiliar method to check the keys
* @return True if all the keys are OK
*/
template< class TVarType >
bool CheckVariableKeysHelper()
{
KRATOS_TRY
for (const auto& var : KratosComponents< TVarType >::GetComponents()) {
if (var.first == "NONE" || var.first == "")
std::cout << " var first is NONE or empty " << var.first << var.second << std::endl;
if (var.second->Name() == "NONE" || var.second->Name() == "")
std::cout << var.first << var.second << std::endl;
if (var.first != var.second->Name()) //name of registration does not correspond to the var name
std::cout << "Registration Name = " << var.first << " Variable Name = " << std::endl;
}
return true;
KRATOS_CATCH("")
}
template <class TContainerType>
TContainerType& GetContainer(ModelPart& rModelPart);
template <class TContainerType>
const TContainerType& GetContainer(const ModelPart& rModelPart);
template <class TContainerType, class TSetterFunction, class TGetterFunction>
void CopyModelPartFlaggedVariable(
const ModelPart& rOriginModelPart,
ModelPart& rDestinationModelPart,
const Flags& rFlag,
const bool CheckValue,
TSetterFunction&& rSetterFunction,
TGetterFunction&& rGetterFunction)
{
KRATOS_TRY
const auto& r_origin_container = GetContainer<TContainerType>(rOriginModelPart);
auto& r_destination_container = GetContainer<TContainerType>(rDestinationModelPart);
const int number_of_origin_items = r_origin_container.size();
const int number_of_destination_items = r_destination_container.size();
KRATOS_ERROR_IF_NOT(number_of_origin_items == number_of_destination_items)
<< "Origin ( " << rOriginModelPart.Name() << " ) and destination ( "
<< rDestinationModelPart.Name() << " ) model parts have different number of items."
<< "\n\t- Number of origin items: " << number_of_origin_items
<< "\n\t- Number of destination items: " << number_of_destination_items
<< std::endl;
IndexPartition<int>(number_of_origin_items).for_each([&](int i_node) {
const auto& r_orig_item = *(r_origin_container.begin() + i_node);
auto& r_dest_item = *(r_destination_container.begin() + i_node);
if (r_orig_item.Is(rFlag) == CheckValue) {
rSetterFunction(r_dest_item, rGetterFunction(r_orig_item));
}
});
KRATOS_CATCH("");
}
///@}
///@name Private Acces
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class VariableUtils */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_VARIABLE_UTILS defined */
|
TSDFVoxelGridImpl.h | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <atomic>
#include <cmath>
#include "open3d/core/Dispatch.h"
#include "open3d/core/Dtype.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/t/geometry/Utility.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/geometry/kernel/TSDFVoxel.h"
#include "open3d/t/geometry/kernel/TSDFVoxelGrid.h"
#include "open3d/utility/Logging.h"
#include "open3d/utility/Timer.h"
namespace open3d {
namespace t {
namespace geometry {
namespace kernel {
namespace tsdf {
#if defined(__CUDACC__)
void IntegrateCUDA
#else
void IntegrateCPU
#endif
(const core::Tensor& depth,
const core::Tensor& color,
const core::Tensor& indices,
const core::Tensor& block_keys,
core::Tensor& block_values,
// Transforms
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
// Parameters
int64_t resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_max) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size);
// Real data indexer
NDArrayIndexer depth_indexer(depth, 2);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
// Optional color integration
NDArrayIndexer color_indexer;
bool integrate_color = false;
if (color.NumElements() != 0) {
color_indexer = NDArrayIndexer(color, 2);
integrate_color = true;
}
// Plain arrays that does not require indexers
const int* indices_ptr = indices.GetDataPtr<int>();
int64_t n = indices.GetLength() * resolution3;
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int block_idx = indices_ptr[workload_idx / resolution3];
int voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// coordinate in world (in voxel)
int64_t x = (xb * resolution + xv);
int64_t y = (yb * resolution + yv);
int64_t z = (zb * resolution + zv);
// coordinate in camera (in voxel -> in meter)
float xc, yc, zc, u, v;
transform_indexer.RigidTransform(
static_cast<float>(x), static_cast<float>(y),
static_cast<float>(z), &xc, &yc, &zc);
// coordinate in image (in pixel)
transform_indexer.Project(xc, yc, zc, &u, &v);
if (!depth_indexer.InBoundary(u, v)) {
return;
}
// Associate image workload and compute SDF and TSDF.
float depth = *depth_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v)) /
depth_scale;
float sdf = (depth - zc);
if (depth <= 0 || depth > depth_max || zc <= 0 ||
sdf < -sdf_trunc) {
return;
}
sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
sdf /= sdf_trunc;
// Associate voxel workload and update TSDF/Weights
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
if (integrate_color) {
float* color_ptr = color_indexer.GetDataPtr<float>(
static_cast<int64_t>(u),
static_cast<int64_t>(v));
voxel_ptr->Integrate(sdf, color_ptr[0], color_ptr[1],
color_ptr[2]);
} else {
voxel_ptr->Integrate(sdf);
}
});
});
#if defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
#if defined(__CUDACC__)
void ExtractSurfacePointsCUDA
#else
void ExtractSurfacePointsCPU
#endif
(const core::Tensor& indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& points,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& valid_size) {
// Parameters
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
int64_t n_blocks = indices.GetLength();
int64_t n = n_blocks * resolution3;
// Output
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
if (valid_size < 0) {
utility::LogWarning(
"No estimated max point cloud size provided, using a 2-pass "
"estimation. Surface extraction could be slow.");
// This pass determines valid number of points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
auto GetVoxelAt =
[&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx,
// voxel_idx)
int64_t workload_block_idx = workload_idx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
// Enumerate x-y-z directions
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
}
});
});
#if defined(__CUDACC__)
valid_size = count[0].Item<int>();
count[0] = 0;
#else
valid_size = (*count_ptr).load();
(*count_ptr) = 0;
#endif
}
int max_count = valid_size;
if (points.GetLength() == 0) {
points = core::Tensor({max_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
}
NDArrayIndexer point_indexer(points, 1);
// Normals
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
if (normals.value().get().GetLength() == 0) {
normals.value().get() =
core::Tensor({max_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
}
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
// This pass extracts exact surface points.
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
// Colors
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
if (colors.value().get().GetLength() == 0) {
colors.value().get() = core::Tensor(
{max_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
}
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution),
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo,
int curr_block_idx,
float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution), voxel_size,
nb_block_masks_indexer,
nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = workload_idx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = workload_idx % resolution3;
/// Coordinate transform
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr =
block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
voxel_t* voxel_ptr =
voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float weight_o = voxel_ptr->GetWeight();
if (weight_o <= weight_threshold) return;
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
float no[3] = {0}, ni[3] = {0};
if (extract_normal) {
GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx), no);
}
// Enumerate x-y-z axis
for (int i = 0; i < 3; ++i) {
voxel_t* ptr = GetVoxelAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx));
if (ptr == nullptr) continue;
float tsdf_i = ptr->GetTSDF();
float weight_i = ptr->GetWeight();
if (weight_i > weight_threshold &&
tsdf_i * tsdf_o < 0) {
float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
if (idx >= valid_size) {
printf("Point cloud size larger than "
"estimated, please increase the "
"estimation!\n");
return;
}
float* point_ptr =
point_indexer.GetDataPtr<float>(idx);
point_ptr[0] =
voxel_size * (x + ratio * int(i == 0));
point_ptr[1] =
voxel_size * (y + ratio * int(i == 1));
point_ptr[2] =
voxel_size * (z + ratio * int(i == 2));
if (extract_color) {
float* color_ptr =
color_indexer.GetDataPtr<float>(idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_i = ptr->GetR();
float g_i = ptr->GetG();
float b_i = ptr->GetB();
color_ptr[0] =
((1 - ratio) * r_o + ratio * r_i) /
255.0f;
color_ptr[1] =
((1 - ratio) * g_o + ratio * g_i) /
255.0f;
color_ptr[2] =
((1 - ratio) * b_o + ratio * b_i) /
255.0f;
}
if (extract_normal) {
GetNormalAt(
static_cast<int>(xv) + (i == 0),
static_cast<int>(yv) + (i == 1),
static_cast<int>(zv) + (i == 2),
static_cast<int>(workload_block_idx),
ni);
float* normal_ptr =
normal_indexer.GetDataPtr<float>(idx);
float nx = (1 - ratio) * no[0] + ratio * ni[0];
float ny = (1 - ratio) * no[1] + ratio * ni[1];
float nz = (1 - ratio) * no[2] + ratio * ni[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny + nz * nz) +
1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
}
}
});
});
#if defined(__CUDACC__)
int total_count = count.Item<int>();
#else
int total_count = (*count_ptr).load();
#endif
utility::LogDebug("{} vertices extracted", total_count);
valid_size = total_count;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
#if defined(__CUDACC__)
void ExtractSurfaceMeshCUDA
#else
void ExtractSurfaceMeshCPU
#endif
(const core::Tensor& indices,
const core::Tensor& inv_indices,
const core::Tensor& nb_indices,
const core::Tensor& nb_masks,
const core::Tensor& block_keys,
const core::Tensor& block_values,
core::Tensor& vertices,
core::Tensor& triangles,
utility::optional<std::reference_wrapper<core::Tensor>> normals,
utility::optional<std::reference_wrapper<core::Tensor>> colors,
int64_t resolution,
float voxel_size,
float weight_threshold,
int& vertex_count) {
int64_t resolution3 = resolution * resolution * resolution;
// Shape / transform indexers, no data involved
NDArrayIndexer voxel_indexer({resolution, resolution, resolution});
int n_blocks = static_cast<int>(indices.GetLength());
// TODO(wei): profile performance by replacing the table to a hashmap.
// Voxel-wise mesh info. 4 channels correspond to:
// 3 edges' corresponding vertex index + 1 table index.
core::Tensor mesh_structure;
try {
mesh_structure = core::Tensor::Zeros(
{n_blocks, resolution, resolution, resolution, 4},
core::Dtype::Int32, block_keys.GetDevice());
} catch (const std::runtime_error&) {
utility::LogError(
"[MeshExtractionKernel] Unable to allocate assistance mesh "
"structure for Marching "
"Cubes with {} active voxel blocks. Please consider using a "
"larger voxel size (currently {}) for TSDF "
"integration, or using tsdf_volume.cpu() to perform mesh "
"extraction on CPU.",
n_blocks, voxel_size);
}
// Real data indexer
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer mesh_structure_indexer(mesh_structure, 4);
NDArrayIndexer nb_block_masks_indexer(nb_masks, 2);
NDArrayIndexer nb_block_indices_indexer(nb_indices, 2);
// Plain arrays that does not require indexers
const int64_t* indices_ptr = indices.GetDataPtr<int64_t>();
const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>();
int64_t n = n_blocks * resolution3;
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
#endif
int64_t voxel_bytesize = voxel_block_buffer_indexer.ElementByteSize();
// Pass 0: analyze mesh structure, set up one-on-one correspondences
// from edges to vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution), nb_block_masks_indexer,
nb_block_indices_indexer, voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Check per-vertex sign in the cube to determine cube
// type
int table_idx = 0;
for (int i = 0; i < 8; ++i) {
voxel_t* voxel_ptr_i =
GetVoxelAt(static_cast<int>(xv) + vtx_shifts[i][0],
static_cast<int>(yv) + vtx_shifts[i][1],
static_cast<int>(zv) + vtx_shifts[i][2],
static_cast<int>(workload_block_idx));
if (voxel_ptr_i == nullptr) return;
float tsdf_i = voxel_ptr_i->GetTSDF();
float weight_i = voxel_ptr_i->GetWeight();
if (weight_i <= weight_threshold) return;
table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
}
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
mesh_struct_ptr[3] = table_idx;
if (table_idx == 0 || table_idx == 255) return;
// Check per-edge sign determine the cube type
int edges_with_vertices = edge_table[table_idx];
for (int i = 0; i < 12; ++i) {
if (edges_with_vertices & (1 << i)) {
int64_t xv_i = xv + edge_shifts[i][0];
int64_t yv_i = yv + edge_shifts[i][1];
int64_t zv_i = zv + edge_shifts[i][2];
int edge_i = edge_shifts[i][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer.GetDataPtr<int64_t>(
workload_block_idx, nb_idx);
int* mesh_ptr_i = mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution, yv_i - dyb * resolution,
zv_i - dzb * resolution,
inv_indices_ptr[block_idx_i]);
// Non-atomic write, but we are safe
mesh_ptr_i[edge_i] = -1;
}
}
});
});
// Pass 1: determine valid number of vertices (if not preset)
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {}, core::Dtype::Int32,
block_values.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
if (vertex_count < 0) {
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
OPEN3D_ATOMIC_ADD(count_ptr, 1);
}
});
#if defined(__CUDACC__)
vertex_count = count.Item<int>();
#else
vertex_count = (*count_ptr).load();
#endif
}
utility::LogDebug("Total vertex count = {}", vertex_count);
vertices = core::Tensor({vertex_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
bool extract_normal = false;
NDArrayIndexer normal_indexer;
if (normals.has_value()) {
extract_normal = true;
normals.value().get() =
core::Tensor({vertex_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
normal_indexer = NDArrayIndexer(normals.value().get(), 1);
}
NDArrayIndexer block_keys_indexer(block_keys, 1);
NDArrayIndexer vertex_indexer(vertices, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
// Pass 2: extract vertices.
DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() {
bool extract_color = false;
NDArrayIndexer color_indexer;
if (voxel_t::HasColor() && colors.has_value()) {
extract_color = true;
colors.value().get() =
core::Tensor({vertex_count, 3}, core::Dtype::Float32,
block_values.GetDevice());
color_indexer = NDArrayIndexer(colors.value().get(), 1);
}
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
auto GetVoxelAt = [&] OPEN3D_DEVICE(
int xo, int yo, int zo,
int curr_block_idx) -> voxel_t* {
return DeviceGetVoxelAt<voxel_t>(
xo, yo, zo, curr_block_idx,
static_cast<int>(resolution), nb_block_masks_indexer,
nb_block_indices_indexer, voxel_block_buffer_indexer);
};
auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo,
int curr_block_idx, float* n) {
return DeviceGetNormalAt<voxel_t>(
xo, yo, zo, curr_block_idx, n,
static_cast<int>(resolution), voxel_size,
nb_block_masks_indexer, nb_block_indices_indexer,
voxel_block_buffer_indexer);
};
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t block_idx = indices_ptr[workload_block_idx];
int64_t voxel_idx = widx % resolution3;
// block_idx -> (x_block, y_block, z_block)
int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx);
int64_t xb = static_cast<int64_t>(block_key_ptr[0]);
int64_t yb = static_cast<int64_t>(block_key_ptr[1]);
int64_t zb = static_cast<int64_t>(block_key_ptr[2]);
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// global coordinate (in voxels)
int64_t x = xb * resolution + xv;
int64_t y = yb * resolution + yv;
int64_t z = zb * resolution + zv;
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
// Early quit -- no allocated vertex to compute
if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
mesh_struct_ptr[2] != -1) {
return;
}
// Obtain voxel ptr
voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
xv, yv, zv, block_idx);
float tsdf_o = voxel_ptr->GetTSDF();
float no[3] = {0}, ne[3] = {0};
if (extract_normal) {
GetNormalAt(static_cast<int>(xv), static_cast<int>(yv),
static_cast<int>(zv),
static_cast<int>(workload_block_idx), no);
}
// Enumerate 3 edges in the voxel
for (int e = 0; e < 3; ++e) {
int vertex_idx = mesh_struct_ptr[e];
if (vertex_idx != -1) continue;
voxel_t* voxel_ptr_e =
GetVoxelAt(static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx));
OPEN3D_ASSERT(voxel_ptr_e != nullptr &&
"Internal error: GetVoxelAt returns nullptr.");
float tsdf_e = voxel_ptr_e->GetTSDF();
float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
mesh_struct_ptr[e] = idx;
float ratio_x = ratio * int(e == 0);
float ratio_y = ratio * int(e == 1);
float ratio_z = ratio * int(e == 2);
float* vertex_ptr = vertex_indexer.GetDataPtr<float>(idx);
vertex_ptr[0] = voxel_size * (x + ratio_x);
vertex_ptr[1] = voxel_size * (y + ratio_y);
vertex_ptr[2] = voxel_size * (z + ratio_z);
if (extract_normal) {
float* normal_ptr = normal_indexer.GetDataPtr<float>(idx);
GetNormalAt(static_cast<int>(xv) + (e == 0),
static_cast<int>(yv) + (e == 1),
static_cast<int>(zv) + (e == 2),
static_cast<int>(workload_block_idx), ne);
float nx = (1 - ratio) * no[0] + ratio * ne[0];
float ny = (1 - ratio) * no[1] + ratio * ne[1];
float nz = (1 - ratio) * no[2] + ratio * ne[2];
float norm = static_cast<float>(
sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
normal_ptr[0] = nx / norm;
normal_ptr[1] = ny / norm;
normal_ptr[2] = nz / norm;
}
if (extract_color) {
float* color_ptr = color_indexer.GetDataPtr<float>(idx);
float r_o = voxel_ptr->GetR();
float g_o = voxel_ptr->GetG();
float b_o = voxel_ptr->GetB();
float r_e = voxel_ptr_e->GetR();
float g_e = voxel_ptr_e->GetG();
float b_e = voxel_ptr_e->GetB();
color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f;
color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f;
color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f;
}
}
});
});
// Pass 3: connect vertices and form triangles.
int triangle_count = vertex_count * 3;
triangles = core::Tensor({triangle_count, 3}, core::Dtype::Int64,
block_values.GetDevice());
NDArrayIndexer triangle_indexer(triangles, 1);
#if defined(__CUDACC__)
count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32,
block_values.GetDevice());
count_ptr = count.GetDataPtr<int>();
#else
(*count_ptr) = 0;
#endif
launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) {
// Natural index (0, N) -> (block_idx, voxel_idx)
int64_t workload_block_idx = widx / resolution3;
int64_t voxel_idx = widx % resolution3;
// voxel_idx -> (x_voxel, y_voxel, z_voxel)
int64_t xv, yv, zv;
voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
// Obtain voxel's mesh struct ptr
int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>(
xv, yv, zv, workload_block_idx);
int table_idx = mesh_struct_ptr[3];
if (tri_count[table_idx] == 0) return;
for (size_t tri = 0; tri < 16; tri += 3) {
if (tri_table[table_idx][tri] == -1) return;
int tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
for (size_t vertex = 0; vertex < 3; ++vertex) {
int edge = tri_table[table_idx][tri + vertex];
int64_t xv_i = xv + edge_shifts[edge][0];
int64_t yv_i = yv + edge_shifts[edge][1];
int64_t zv_i = zv + edge_shifts[edge][2];
int64_t edge_i = edge_shifts[edge][3];
int dxb = static_cast<int>(xv_i / resolution);
int dyb = static_cast<int>(yv_i / resolution);
int dzb = static_cast<int>(zv_i / resolution);
int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
int64_t block_idx_i =
*nb_block_indices_indexer.GetDataPtr<int64_t>(
workload_block_idx, nb_idx);
int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtr<int>(
xv_i - dxb * resolution, yv_i - dyb * resolution,
zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]);
int64_t* triangle_ptr =
triangle_indexer.GetDataPtr<int64_t>(tri_idx);
triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
}
}
});
#if defined(__CUDACC__)
triangle_count = count.Item<int>();
#else
triangle_count = (*count_ptr).load();
#endif
utility::LogInfo("Total triangle count = {}", triangle_count);
triangles = triangles.Slice(0, 0, triangle_count);
}
#if defined(__CUDACC__)
void EstimateRangeCUDA
#else
void EstimateRangeCPU
#endif
(const core::Tensor& block_keys,
core::Tensor& range_minmax_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int down_factor,
int64_t block_resolution,
float voxel_size,
float depth_min,
float depth_max) {
// TODO(wei): reserve it in a reusable buffer
// Every 2 channels: (min, max)
int h_down = h / down_factor;
int w_down = w / down_factor;
range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Dtype::Float32,
block_keys.GetDevice());
NDArrayIndexer range_map_indexer(range_minmax_map, 2);
// Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max)
const int fragment_size = 16;
const int frag_buffer_size = 65535;
// TODO(wei): explicit buffer
core::Tensor fragment_buffer =
core::Tensor({frag_buffer_size, 6}, core::Dtype::Float32,
block_keys.GetDevice());
NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1);
NDArrayIndexer block_keys_indexer(block_keys, 1);
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
#if defined(__CUDACC__)
core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32,
block_keys.GetDevice());
int* count_ptr = count.GetDataPtr<int>();
#else
std::atomic<int> count_atomic(0);
std::atomic<int>* count_ptr = &count_atomic;
#endif
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
using std::max;
using std::min;
#endif
// Pass 0: iterate over blocks, fill-in an rendering fragment array
launcher::ParallelFor(
block_keys.GetLength(), [=] OPEN3D_DEVICE(int64_t workload_idx) {
int* key = block_keys_indexer.GetDataPtr<int>(workload_idx);
int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
v_max = 0;
float z_min = depth_max, z_max = depth_min;
float xc, yc, zc, u, v;
// Project 8 corners to low-res image and form a rectangle
for (int i = 0; i < 8; ++i) {
float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
voxel_size;
float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
voxel_size;
float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
voxel_size;
w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc,
&zc);
if (zc <= 0) continue;
// Project to the down sampled image buffer
w2c_transform_indexer.Project(xc, yc, zc, &u, &v);
u /= down_factor;
v /= down_factor;
v_min = min(static_cast<int>(floorf(v)), v_min);
v_max = max(static_cast<int>(ceilf(v)), v_max);
u_min = min(static_cast<int>(floorf(u)), u_min);
u_max = max(static_cast<int>(ceilf(u)), u_max);
z_min = min(z_min, zc);
z_max = max(z_max, zc);
}
v_min = max(0, v_min);
v_max = min(h_down - 1, v_max);
u_min = max(0, u_min);
u_max = min(w_down - 1, u_max);
if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return;
// Divide the rectangle into small 16x16 fragments
int frag_v_count =
ceilf(float(v_max - v_min + 1) / float(fragment_size));
int frag_u_count =
ceilf(float(u_max - u_min + 1) / float(fragment_size));
int frag_count = frag_v_count * frag_u_count;
int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1);
int frag_count_end = frag_count_start + frag_count;
if (frag_count_end >= frag_buffer_size) {
printf("Fragment count exceeding buffer size, abort!\n");
}
int offset = 0;
for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
for (int frag_u = 0; frag_u < frag_u_count;
++frag_u, ++offset) {
float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(
frag_count_start + offset);
// zmin, zmax
frag_ptr[0] = z_min;
frag_ptr[1] = z_max;
// vmin, umin
frag_ptr[2] = v_min + frag_v * fragment_size;
frag_ptr[3] = u_min + frag_u * fragment_size;
// vmax, umax
frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
static_cast<float>(v_max));
frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
static_cast<float>(u_max));
}
}
});
#if defined(__CUDACC__)
int frag_count = count[0].Item<int>();
#else
int frag_count = (*count_ptr).load();
#endif
// Pass 0.5: Fill in range map to prepare for atomic min/max
launcher::ParallelFor(
h_down * w_down, [=] OPEN3D_DEVICE(int64_t workload_idx) {
int v = workload_idx / w_down;
int u = workload_idx % w_down;
float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
range_ptr[0] = depth_max;
range_ptr[1] = depth_min;
});
// Pass 1: iterate over rendering fragment array, fill-in range
launcher::ParallelFor(
frag_count * fragment_size * fragment_size,
[=] OPEN3D_DEVICE(int64_t workload_idx) {
int frag_idx = workload_idx / (fragment_size * fragment_size);
int local_idx = workload_idx % (fragment_size * fragment_size);
int dv = local_idx / fragment_size;
int du = local_idx % fragment_size;
float* frag_ptr =
frag_buffer_indexer.GetDataPtr<float>(frag_idx);
int v_min = static_cast<int>(frag_ptr[2]);
int u_min = static_cast<int>(frag_ptr[3]);
int v_max = static_cast<int>(frag_ptr[4]);
int u_max = static_cast<int>(frag_ptr[5]);
int v = v_min + dv;
int u = u_min + du;
if (v > v_max || u > u_max) return;
float z_min = frag_ptr[0];
float z_max = frag_ptr[1];
float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
#ifdef __CUDACC__
atomicMinf(&(range_ptr[0]), z_min);
atomicMaxf(&(range_ptr[1]), z_max);
#else
#pragma omp critical(EstimateRangeCPU)
{
range_ptr[0] = min(z_min, range_ptr[0]);
range_ptr[1] = max(z_max, range_ptr[1]);
}
#endif
});
#if defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
struct BlockCache {
int x;
int y;
int z;
int block_idx;
inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) {
return (xin == x && yin == y && zin == z) ? block_idx : -1;
}
inline void OPEN3D_DEVICE Update(int xin,
int yin,
int zin,
int block_idx_in) {
x = xin;
y = yin;
z = zin;
block_idx = block_idx_in;
}
};
#if defined(__CUDACC__)
void RayCastCUDA
#else
void RayCastCPU
#endif
(std::shared_ptr<core::DeviceHashmap>& hashmap,
const core::Tensor& block_values,
const core::Tensor& range_map,
core::Tensor& vertex_map,
core::Tensor& depth_map,
core::Tensor& color_map,
core::Tensor& normal_map,
const core::Tensor& intrinsics,
const core::Tensor& extrinsics,
int h,
int w,
int64_t block_resolution,
float voxel_size,
float sdf_trunc,
float depth_scale,
float depth_min,
float depth_max,
float weight_threshold) {
using Key = core::Block<int, 3>;
using Hash = core::BlockHash<int, 3>;
#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
auto cuda_hashmap =
std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap);
if (cuda_hashmap == nullptr) {
utility::LogError(
"Unsupported backend: CUDA raycasting only supports STDGPU.");
}
auto hashmap_impl = cuda_hashmap->GetImpl();
#else
auto cpu_hashmap =
std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap);
auto hashmap_impl = *cpu_hashmap->GetImpl();
#endif
NDArrayIndexer voxel_block_buffer_indexer(block_values, 4);
NDArrayIndexer range_map_indexer(range_map, 2);
NDArrayIndexer vertex_map_indexer;
NDArrayIndexer depth_map_indexer;
NDArrayIndexer color_map_indexer;
NDArrayIndexer normal_map_indexer;
bool enable_vertex = (vertex_map.GetLength() != 0);
bool enable_depth = (depth_map.GetLength() != 0);
bool enable_color = (color_map.GetLength() != 0);
bool enable_normal = (normal_map.GetLength() != 0);
if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) {
utility::LogWarning("No output specified for ray casting, exit.");
return;
}
if (enable_vertex) {
vertex_map_indexer = NDArrayIndexer(vertex_map, 2);
}
if (enable_depth) {
depth_map_indexer = NDArrayIndexer(depth_map, 2);
}
if (enable_color) {
color_map_indexer = NDArrayIndexer(color_map, 2);
}
if (enable_normal) {
normal_map_indexer = NDArrayIndexer(normal_map, 2);
}
TransformIndexer c2w_transform_indexer(
intrinsics, t::geometry::InverseTransformation(extrinsics));
TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
int64_t rows = h;
int64_t cols = w;
float block_size = voxel_size * block_resolution;
#if defined(__CUDACC__)
namespace launcher = core::kernel::cuda_launcher;
#else
namespace launcher = core::kernel::cpu_launcher;
using std::max;
#endif
DISPATCH_BYTESIZE_TO_VOXEL(
voxel_block_buffer_indexer.ElementByteSize(), [&]() {
launcher::ParallelFor(rows * cols, [=] OPEN3D_DEVICE(
int64_t workload_idx) {
auto GetVoxelAtP = [&] OPEN3D_DEVICE(
int x_b, int y_b, int z_b,
int x_v, int y_v, int z_v,
core::addr_t block_addr,
BlockCache& cache) -> voxel_t* {
int x_vn = (x_v + block_resolution) % block_resolution;
int y_vn = (y_v + block_resolution) % block_resolution;
int z_vn = (z_v + block_resolution) % block_resolution;
int dx_b = Sign(x_v - x_vn);
int dy_b = Sign(y_v - y_vn);
int dz_b = Sign(z_v - z_vn);
if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_v, y_v, z_v,
block_addr);
} else {
Key key;
key.Set(0, x_b + dx_b);
key.Set(1, y_b + dy_b);
key.Set(2, z_b + dz_b);
int block_addr = cache.Check(key.Get(0), key.Get(1),
key.Get(2));
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(key.Get(0), key.Get(1), key.Get(2),
block_addr);
}
return voxel_block_buffer_indexer
.GetDataPtr<voxel_t>(x_vn, y_vn, z_vn,
block_addr);
}
};
auto GetVoxelAtT = [&] OPEN3D_DEVICE(
float x_o, float y_o, float z_o,
float x_d, float y_d, float z_d,
float t,
BlockCache& cache) -> voxel_t* {
float x_g = x_o + t * x_d;
float y_g = y_o + t * y_d;
float z_g = z_o + t * z_d;
// Block coordinate and look up
int x_b = static_cast<int>(floorf(x_g / block_size));
int y_b = static_cast<int>(floorf(y_g / block_size));
int z_b = static_cast<int>(floorf(z_g / block_size));
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return nullptr;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
// Voxel coordinate and look up
int x_v = int((x_g - x_b * block_size) / voxel_size);
int y_v = int((y_g - y_b * block_size) / voxel_size);
int z_v = int((z_g - z_b * block_size) / voxel_size);
return voxel_block_buffer_indexer.GetDataPtr<voxel_t>(
x_v, y_v, z_v, block_addr);
};
int64_t y = workload_idx / cols;
int64_t x = workload_idx % cols;
float *depth_ptr = nullptr, *vertex_ptr = nullptr,
*normal_ptr = nullptr, *color_ptr = nullptr;
if (enable_depth) {
depth_ptr = depth_map_indexer.GetDataPtr<float>(x, y);
*depth_ptr = 0;
}
if (enable_vertex) {
vertex_ptr = vertex_map_indexer.GetDataPtr<float>(x, y);
vertex_ptr[0] = 0;
vertex_ptr[1] = 0;
vertex_ptr[2] = 0;
}
if (enable_color) {
color_ptr = color_map_indexer.GetDataPtr<float>(x, y);
color_ptr[0] = 0;
color_ptr[1] = 0;
color_ptr[2] = 0;
}
if (enable_normal) {
normal_ptr = normal_map_indexer.GetDataPtr<float>(x, y);
normal_ptr[0] = 0;
normal_ptr[1] = 0;
normal_ptr[2] = 0;
}
const float* range =
range_map_indexer.GetDataPtr<float>(x / 8, y / 8);
float t = range[0];
const float t_max = range[1];
if (t >= t_max) return;
// Coordinates in camera and global
float x_c = 0, y_c = 0, z_c = 0;
float x_g = 0, y_g = 0, z_g = 0;
float x_o = 0, y_o = 0, z_o = 0;
// Iterative ray intersection check
float t_prev = t;
float tsdf_prev = -1.0f;
float tsdf = 1.0;
float w = 0.0;
// Camera origin
c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o,
&z_o);
// Direction
c2w_transform_indexer.Unproject(static_cast<float>(x),
static_cast<float>(y), 1.0f,
&x_c, &y_c, &z_c);
c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g,
&y_g, &z_g);
float x_d = (x_g - x_o);
float y_d = (y_g - y_o);
float z_d = (z_g - z_o);
BlockCache cache{0, 0, 0, -1};
bool surface_found = false;
while (t < t_max) {
voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d,
y_d, z_d, t, cache);
if (!voxel_ptr) {
t_prev = t;
t += block_size;
} else {
tsdf_prev = tsdf;
tsdf = voxel_ptr->GetTSDF();
w = voxel_ptr->GetWeight();
if (tsdf_prev > 0 && w >= weight_threshold &&
tsdf <= 0) {
surface_found = true;
break;
}
t_prev = t;
float delta = tsdf * sdf_trunc;
t += delta < voxel_size ? voxel_size : delta;
}
}
if (surface_found) {
float t_intersect = (t * tsdf_prev - t_prev * tsdf) /
(tsdf_prev - tsdf);
x_g = x_o + t_intersect * x_d;
y_g = y_o + t_intersect * y_d;
z_g = z_o + t_intersect * z_d;
// Trivial vertex assignment
if (enable_depth) {
*depth_ptr = t_intersect * depth_scale;
}
if (enable_vertex) {
w2c_transform_indexer.RigidTransform(
x_g, y_g, z_g, vertex_ptr + 0,
vertex_ptr + 1, vertex_ptr + 2);
}
// Trilinear interpolation
// TODO(wei): simplify the flow by splitting the
// functions given what is enabled
if (enable_color || enable_normal) {
int x_b =
static_cast<int>(floorf(x_g / block_size));
int y_b =
static_cast<int>(floorf(y_g / block_size));
int z_b =
static_cast<int>(floorf(z_g / block_size));
float x_v = (x_g - float(x_b) * block_size) /
voxel_size;
float y_v = (y_g - float(y_b) * block_size) /
voxel_size;
float z_v = (z_g - float(z_b) * block_size) /
voxel_size;
Key key;
key.Set(0, x_b);
key.Set(1, y_b);
key.Set(2, z_b);
int block_addr = cache.Check(x_b, y_b, z_b);
if (block_addr < 0) {
auto iter = hashmap_impl.find(key);
if (iter == hashmap_impl.end()) return;
block_addr = iter->second;
cache.Update(x_b, y_b, z_b, block_addr);
}
int x_v_floor = static_cast<int>(floorf(x_v));
int y_v_floor = static_cast<int>(floorf(y_v));
int z_v_floor = static_cast<int>(floorf(z_v));
float ratio_x = x_v - float(x_v_floor);
float ratio_y = y_v - float(y_v_floor);
float ratio_z = z_v - float(z_v_floor);
float sum_weight_color = 0.0;
float sum_weight_normal = 0.0;
for (int k = 0; k < 8; ++k) {
int dx_v = (k & 1) > 0 ? 1 : 0;
int dy_v = (k & 2) > 0 ? 1 : 0;
int dz_v = (k & 4) > 0 ? 1 : 0;
float ratio = (dx_v * (ratio_x) +
(1 - dx_v) * (1 - ratio_x)) *
(dy_v * (ratio_y) +
(1 - dy_v) * (1 - ratio_y)) *
(dz_v * (ratio_z) +
(1 - dz_v) * (1 - ratio_z));
voxel_t* voxel_ptr_k = GetVoxelAtP(
x_b, y_b, z_b, x_v_floor + dx_v,
y_v_floor + dy_v, z_v_floor + dz_v,
block_addr, cache);
if (enable_color && voxel_ptr_k &&
voxel_ptr_k->GetWeight() > 0) {
sum_weight_color += ratio;
color_ptr[0] += ratio * voxel_ptr_k->GetR();
color_ptr[1] += ratio * voxel_ptr_k->GetG();
color_ptr[2] += ratio * voxel_ptr_k->GetB();
}
if (enable_normal) {
for (int dim = 0; dim < 3; ++dim) {
voxel_t* voxel_ptr_k_plus = GetVoxelAtP(
x_b, y_b, z_b,
x_v_floor + dx_v + (dim == 0),
y_v_floor + dy_v + (dim == 1),
z_v_floor + dz_v + (dim == 2),
block_addr, cache);
voxel_t* voxel_ptr_k_minus =
GetVoxelAtP(x_b, y_b, z_b,
x_v_floor + dx_v -
(dim == 0),
y_v_floor + dy_v -
(dim == 1),
z_v_floor + dz_v -
(dim == 2),
block_addr, cache);
bool valid = false;
if (voxel_ptr_k_plus &&
voxel_ptr_k_plus->GetWeight() > 0) {
normal_ptr[dim] +=
ratio *
voxel_ptr_k_plus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
if (voxel_ptr_k_minus &&
voxel_ptr_k_minus->GetWeight() >
0) {
normal_ptr[dim] -=
ratio *
voxel_ptr_k_minus
->GetTSDF() /
(2 * voxel_size);
valid = true;
}
sum_weight_normal += valid ? ratio : 0;
}
} // if (enable_normal)
} // loop over 8 neighbors
if (enable_color && sum_weight_color > 0) {
sum_weight_color *= 255.0;
color_ptr[0] /= sum_weight_color;
color_ptr[1] /= sum_weight_color;
color_ptr[2] /= sum_weight_color;
}
if (enable_normal && sum_weight_normal > 0) {
normal_ptr[0] /= sum_weight_normal;
normal_ptr[1] /= sum_weight_normal;
normal_ptr[2] /= sum_weight_normal;
float norm =
sqrt(normal_ptr[0] * normal_ptr[0] +
normal_ptr[1] * normal_ptr[1] +
normal_ptr[2] * normal_ptr[2]);
w2c_transform_indexer.Rotate(
normal_ptr[0] / norm,
normal_ptr[1] / norm,
normal_ptr[2] / norm, normal_ptr + 0,
normal_ptr + 1, normal_ptr + 2);
}
} // if (color or normal)
} // if (tsdf < 0)
});
});
#if defined(__CUDACC__)
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
#endif
}
} // namespace tsdf
} // namespace kernel
} // namespace geometry
} // namespace t
} // namespace open3d
|
Scan.h | /*
This file is part of the implementation for the technical paper
Field-Aligned Online Surface Reconstruction
Nico Schertler, Marco Tarini, Wenzel Jakob, Misha Kazhdan, Stefan Gumhold, Daniele Panozzo
ACM TOG 36, 4, July 2017 (Proceedings of SIGGRAPH 2017)
Use of this source code is granted via a BSD-style license, which can be found
in License.txt in the repository root.
@author Nico Schertler
*/
#pragma once
#include "osr/common.h"
#include "osr/INeighborQueryable.h"
#include "osr/HierarchyDecl.h"
#include "osr/nanoflannForwardDeclare.h"
#include "3rd/ICP.h"
#include <nsessentials/math/Morton.h>
#include <nsessentials/math/BoundingBox.h>
#include <nsessentials/gui/GLBuffer.h>
#include <nsessentials/gui/GLVertexArray.h>
#include <nsessentials/util/TimedBlock.h>
#include <random>
#include <iostream>
#include <memory>
#include <map>
namespace osr
{
class Scan;
class OSR_EXPORT IScanRenderer
{
public:
virtual void initialize(Scan& scan) = 0;
virtual void updateData(const Scan& scan) = 0;
virtual void draw(const Scan& scan, const Eigen::Matrix4f & v, const Eigen::Matrix4f & proj) const = 0;
bool showInput;
bool showNormals;
};
//Represents data of a single scan
class OSR_EXPORT Scan : public IPointQueryable<size_t>
{
public:
Scan(const Matrix3Xf& V = Matrix3Xf(), const Matrix3Xf& N = Matrix3Xf(), const Matrix3Xus& C = Matrix3Xus(), const MatrixXu& F = MatrixXu(), const std::string& name = "unnamed", const Eigen::Affine3f& transform = Eigen::Affine3f::Identity());
~Scan();
void initialize();
//Calculates the vertex normals if not already present.
//If there are faces in the data set, uses averaged face normals.
//Otherwise, uses PCA. PCA assumes normals to point towards the origin.
void calculateNormals();
//Access to transformed attributes
Vector3f p(size_t idx) const; //vertex position
Vector3f n(size_t idx) const; //normal
const std::string& getName() { return name; }
const nse::math::BoundingBox<float, 3> boundingBox() const { return bbox; }
nse::math::BoundingBox<float, 3> getTransformedBoundingBox() const;
void updateData();
const Matrix3Xf& V() const { return mV; }
Matrix3Xf& V() { return mV; }
const Matrix3Xf& N() const { return mN; }
Matrix3Xf& N() { return mN; }
const Matrix3Xus& C() const { return mC; }
Matrix3Xus& C() { return mC; }
const MatrixXu& F() const { return mF; }
MatrixXu& F() { return mF; }
//Modifies the scan transform via ICP so as to register to other.
template <typename Index>
void alignTo(const IPointQueryable<Index>& other, int iterations = 20, double subsample = 0.1);
//Removes all points that overlap the hierarchy (i.e. there is a point in the hierarchy with a distance of at most "distance").
void cleanOverlap(const THierarchy& hierarchy, float distance);
const Eigen::Affine3f& transform() const { return mTransform; }
Eigen::Affine3f& transform() { return mTransform; }
std::shared_ptr<IScanRenderer> renderer;
// ---------- nanoflann interface ----------
typedef nanoflann::KDTreeSingleIndexAdaptor< nanoflann::L2_Adaptor<float, Scan, float>, Scan, 3, size_t> KdTreeType;
inline size_t kdtree_get_point_count() const { return mV.cols(); }
inline float kdtree_distance(const float *p1, const size_t idx_p2, size_t size) const
{
float s = 0;
for (size_t i = 0; i < size; ++i)
{
const float d = p1[i] - mV.coeff(i, idx_p2);
s += d*d;
}
return s;
}
inline float kdtree_get_pt(const size_t idx, int dim) const { return mV.coeff(dim, idx); }
template <class BBOX>
bool kdtree_get_bbox(BBOX& bb) const
{
for (int i = 0; i < 3; ++i)
{
bb[i].low = bbox.min(i);
bb[i].high = bbox.max(i);
}
return true;
}
// ---------- end nanoflann interface ----------
void buildTree();
Vector3f neighborP(const size_t& i) const { return mV.col(i); } //access to point position
Vector3f neighborN(const size_t& i) const { return mN.col(i); }; //access to point normal
bool isIndexValid(const size_t& idx) const { return idx < mV.cols(); }
//Finds the closest point that has a similar normal as the provided one
size_t findClosestCompatiblePoint(const Vector3f& p, const Vector3f& n) const;
float closestPointRadius = 30;
#ifdef USE_DAVIDVIVE
struct
{
Eigen::Affine3f transformUncalibrated; //turntable + controller transform
Eigen::Affine3f turntableRotation;
Eigen::Affine3f davidToVive;
} davidViveData;
#endif
private:
KdTreeType* kdTree = nullptr;
private:
void calculateNormalsFromFaces();
void calculateNormalsPCA();
Matrix3Xf mV; //positions
Matrix3Xf mN; //normals
Matrix3Xus mC; //colors
MatrixXu mF; //faces
std::string name;
nse::math::BoundingBox<float, 3> bbox;
Eigen::Affine3f mTransform;
};
template <typename Index>
void Scan::alignTo(const IPointQueryable<Index>& other, int iterations, double subsample)
{
nse::util::TimedBlock b("Registering scan ..");
std::vector<Index> correspondences(mV.cols());
//For each point, find the corresponding point in the other point cloud.
#pragma omp parallel for
for (int i = 0; i < mV.cols(); ++i)
{
if (std::isnan(mV.col(i).x()))
continue;
correspondences[i] = other.findClosestCompatiblePoint(mTransform * mV.col(i), mTransform.linear() * mN.col(i));
}
//Distribute the points with a correspondence into normal buckets.
std::map<nse::math::MortonCode64, std::vector<size_t>> normalBucketsMap;
for (int i = 0; i < mV.cols(); ++i)
{
if (!std::isnan(mV.col(i).x()) && other.isIndexValid(correspondences[i]))
{
Vector3i discrete = (mN.col(i) * 10).cast<int>();
nse::math::MortonCode64 code(discrete.x(), discrete.y(), discrete.z());
normalBucketsMap[code].push_back(i);
}
}
std::vector<std::vector<size_t>> normalBuckets;
int potentialSamples = 0;
for (auto& entry : normalBucketsMap)
{
potentialSamples += entry.second.size();
normalBuckets.push_back(std::move(entry.second));
}
normalBucketsMap.clear();
if (potentialSamples < 10)
{
std::cout << "Could not find enough overlap. Registration will abort." << std::endl;
return;
}
int samples = (int)(potentialSamples * subsample);
std::uniform_int_distribution<size_t> bucketDist(0, normalBuckets.size() - 1);
std::mt19937 rnd;
Matrix3Xf X(3, samples), N(3, samples);
//subsample the point cloud for ICP
for (int i = 0; i < samples; ++i)
{
size_t sample;
if (subsample == 1)
sample = i;
else
{
//normal space sampling
bool sampleOk = false;
int attempt = 0;
while (!sampleOk && attempt++ < 10)
{
auto bucketIdx = bucketDist(rnd);
auto& bucket = normalBuckets[bucketIdx];
std::uniform_int_distribution<size_t> sampleDist(0, bucket.size() - 1);
auto sampleIdx = sampleDist(rnd);
sample = bucket[sampleIdx];
if (std::isnan(mV.coeff(0, sample)) || std::isnan(mN.coeff(0, sample)))
continue;
sampleOk = true;
bucket.erase(bucket.begin() + sampleIdx);
if (bucket.empty())
{
normalBuckets.erase(normalBuckets.begin() + bucketIdx);
bucketDist = std::uniform_int_distribution<size_t>(0, normalBuckets.size() - 1);
}
}
}
X.col(i) = mTransform * mV.col(sample);
N.col(i) = mTransform.linear() * mN.col(sample);
}
//Run ICP
SICP::Parameters params;
params.p = 1.5;
params.max_icp = iterations;
params.max_outer = 10;
params.max_inner = 1;
Eigen::setNbThreads(0);
mTransform = SICP::point_to_plane(X, N, other, params) * mTransform;
Eigen::setNbThreads(1);
}
} |
GB_binop__bshift_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int8)
// C=scalar+B GB (_bind1st__bshift_int8)
// C=scalar+B' GB (_bind1st_tran__bshift_int8)
// C=A+scalar GB (_bind2nd__bshift_int8)
// C=A'+scalar GB (_bind2nd_tran__bshift_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_int8 (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int8 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT8 || GxB_NO_BSHIFT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int8 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int8 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vlad.c | /** @file vlad.c
** @brief VLAD - Declaration
** @author David Novotny
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2013 David Novotny and Andera Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page vlad Vector of Locally Aggregated Descriptors (VLAD) encoding
@author David Novotny
@author Andrea Vedaldi
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref vlad.h implements the *Vector of Linearly Aggregated Descriptors*
(VLAD) image representation @cite{jegou10aggregating}
@cite{arandjelovic13all-about}.
@ref vlad-starting demonstreates how to use the C API to compute the
VLAD representation of an image. For further details on the VLAD image
representation refer to:
- @subpage vlad-fundamentals - VLAD definition and computation.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section vlad-starting Getting started with VLAD
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The VLAD encoding of a set of features is obtained by using the
function ::vl_vlad_encode. The function can be applied to both @c
float or @c double data types.
::vl_vlad_encode requires a visual dictionary, for example obtained by
using @ref kmeans. Furthermore, the assignments of features to
dictionary elements must be pre-computed, for example by using @ref
kdtree.
In the following example code, the vocabulary is first created using
the KMeans clustering, then the points, that are to be encoded are
assigned to its corresponding nearest vocabulary words, after that the
original vlad encoding routine without any normalization option takes place.
At the end of the process the encoding is stored in the @c enc variable.
@code
vl_uint32 * indexes;
float * assignments;
float * enc
int i;
// create a KMeans object and run clustering to get vocabulary words (centers)
kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ;
vl_kmeans_cluster (kmeans,
data,
dimension,
numData,
numCenters) ;
// find nearest cliuster centers for the data that should be encoded
indexes = vl_malloc(sizeof(vl_uint32) * numDataToEncode);
vl_kmeans_quantize(kmeans,indexes,dataToEncode,numDataToEncode);
// convert indexes array to assignments array,
// which can be processed by vl_vlad_encode
assignments = vl_malloc(sizeof(float) * numDataToEncode * numCenters);
memset(assignments, 0, sizeof(float) * numDataToEncode * numCenters);
for(i = 0; i < numDataToEncode; i++) {
assignments[i * numCenters + indexes[i]] = 1.;
}
// allocate space for vlad encoding
enc = vl_malloc(sizeof(TYPE) * dimension * numCenters);
// do the encoding job
vl_vlad_encode (enc, VL_F_TYPE,
vl_kmeans_get_centers(kmeans), dimension, numCenters,
data, numData,
assignments,
0) ;
@endcode
Various @ref vlad-normalization normalizations can be applied to the
VLAD vectors. These are controlled by the parameter @a flag of
::vl_vlad_encode.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page vlad-fundamentals VLAD fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
This page describes the *Vector of Locally Aggregated Descriptors*
(VLAD) image encoding of @cite{jegou10aggregating}. See @ref vlad for
an overview of the C API.
VLAD is a *feature encoding and pooling* method, similar to @ref
fisher "Fisher vectors". VLAD encodes a set of local feature
descriptors $I=(\bx_1,\dots,\bx_n)$ extracted from an image using a
dictionary built using a clustering method such as @ref gmm or @ref
kmeans. Let $q_{ik}$ be the strength of the association of data vector
$\bx_i$ to cluster $\mu_k$, such that $q_{ik} \geq 0$ and
$\sum_{k=1}^K q_{ik} = 1$. The association may be either soft
(e.g. obtained as the posterior probabilities of the GMM clusters) or
hard (e.g. obtained by vector quantization with K-means).
$\mu_k$ are the cluster *means*, vectors of the same dimension as the
data $\bx_i$. VLAD encodes feature $\bx$ by considering the *residuals*
\[
\bv_k = \sum_{i=1}^{N} q_{ik} (\bx_{i} - \mu_k).
\]
The residulas are stacked together to obtain the vector
\[
\hat\Phi(I) =
\begin{bmatrix}
\vdots \\
\bv_k \\
\vdots
\end{bmatrix}
\]
Before the VLAD encoding is used it is usually normalized, as
explained @ref vlad-normalization next.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section vlad-normalization VLAD normalization
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
VLFeat VLAD implementation supports a number of different
normalization strategies. These are optionally applied in this order:
- **Component-wise mass normalization.** Each vector $\bv_k$ is
divided by the total mass of features associated to it $\sum_{i=1}^N
q_{ik}$.
- **Square-rooting.** The function $\sign(z)\sqrt{|z|}$ is applied to
all scalar components of the VLAD descriptor.
- **Component-wise $l^2$ normalization.** The vectors $\bv_k$ are
divided by their norm $\|\bv_k\|_2$.
- **Global $l^2$ normalization.** The VLAD descriptor $\hat\Phi(I)$ is
divided by its norm $\|\hat\Phi(I)\|_2$.
*/
#include "vlad.h"
#include "mathop.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
/* ================================================================ */
#ifdef VL_VLAD_INSTANTIATING
static void
VL_XCAT(_vl_vlad_encode_, SFX)
(TYPE * enc,
TYPE const * means, vl_size dimension, vl_size numClusters,
TYPE const * data, vl_size numData,
TYPE const * assignments,
int flags)
{
vl_uindex dim ;
vl_index i_cl, i_d ;
memset(enc, 0, sizeof(TYPE) * dimension * numClusters) ;
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(i_cl,i_d,dim) num_threads(vl_get_max_threads())
#endif
for (i_cl = 0; i_cl < (signed)numClusters; i_cl++) {
double clusterMass = 0 ;
for (i_d = 0; i_d < (signed)numData; i_d++) {
if (assignments[i_d*numClusters + i_cl] > 0) {
double q = assignments[i_d*numClusters+i_cl] ;
clusterMass += q ;
for(dim = 0; dim < dimension; dim++) {
enc [i_cl * dimension + dim] += q * data [i_d * dimension + dim] ;
}
}
}
if (clusterMass > 0) {
if (flags & VL_VLAD_FLAG_NORMALIZE_MASS) {
for(dim = 0; dim < dimension; dim++) {
enc[i_cl*dimension + dim] /= clusterMass ;
enc[i_cl*dimension + dim] -= means[i_cl*dimension+dim];
}
} else {
for(dim = 0; dim < dimension; dim++) {
enc[i_cl*dimension + dim] -= clusterMass * means[i_cl*dimension+dim];
}
}
}
if (flags & VL_VLAD_FLAG_SQUARE_ROOT) {
for(dim = 0; dim < dimension; dim++) {
TYPE z = enc[i_cl*dimension + dim] ;
if (z >= 0) {
enc[i_cl*dimension + dim] = VL_XCAT(vl_sqrt_, SFX)(z) ;
} else {
enc[i_cl*dimension + dim] = - VL_XCAT(vl_sqrt_, SFX)(- z) ;
}
}
}
if (flags & VL_VLAD_FLAG_NORMALIZE_COMPONENTS) {
TYPE n = 0 ;
dim = 0 ;
for(dim = 0; dim < dimension; dim++) {
TYPE z = enc[i_cl*dimension + dim] ;
n += z * z ;
}
n = VL_XCAT(vl_sqrt_, SFX)(n) ;
n = VL_MAX(n, 1e-12) ;
for(dim = 0; dim < dimension; dim++) {
enc[i_cl*dimension + dim] /= n ;
}
}
}
if (! (flags & VL_VLAD_FLAG_UNNORMALIZED)) {
TYPE n = 0 ;
for(dim = 0 ; dim < dimension * numClusters ; dim++) {
TYPE z = enc [dim] ;
n += z * z ;
}
n = VL_XCAT(vl_sqrt_, SFX)(n) ;
n = VL_MAX(n, 1e-12) ;
for(dim = 0 ; dim < dimension * numClusters ; dim++) {
enc[dim] /= n ;
}
}
}
/* VL_FISHER_INSTANTIATING */
#else
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_VLAD_INSTANTIATING
#include "vlad.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_VLAD_INSTANTIATING
#include "vlad.c"
#endif
/* VL_VLAD_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_VLAD_INSTANTIATING
/** @brief VLAD encoding of a set of vectors.
** @param enc output VLAD encoding (out).
** @param dataType the type of the input data (::VL_TYPE_DOUBLE or ::VL_TYPE_FLOAT).
** @param numData number of data vectors to encode.
** @param means cluster means.
** @param numClusters number of clusters.
** @param data the data vectors to encode.
** @param dimension dimensionality of the data.
** @param assignments data to cluster soft assignments.
** @param flags options.
**
** @a enc is the VLAD vector of size @a numClusters by
** @a dimension. @a means is a matrix with @a numClusters columns and
** @a dimension rows. @a data is the matrix of vectors to be encoded,
** with @a dimension rows and @a numData columns. @a assignments is a
** matrix with @a numClusters rows and @a numData columns.
** All the matrices should be stored in column-major order.
**
** @a flag allows controlling further options:
** ::VL_VLAD_FLAG_NORMALIZE_COMPONENTS, ::VL_VLAD_FLAG_SQUARE_ROOT,
** ::VL_VLAD_FLAG_UNNORMALIZED, and ::VL_VLAD_FLAG_NORMALIZE_MASS.
**
** @sa @ref vlad
**/
void
vl_vlad_encode (void * enc, vl_type dataType,
void const * means, vl_size dimension, vl_size numClusters,
void const * data, vl_size numData,
void const * assignments,
int flags)
{
switch(dataType) {
case VL_TYPE_FLOAT:
_vl_vlad_encode_f ((float *) enc,
(float const *) means, dimension, numClusters,
(float const *) data, numData,
(float const *) assignments, flags) ;
break;
case VL_TYPE_DOUBLE:
_vl_vlad_encode_d ((double *) enc,
(double const *) means, dimension, numClusters,
(double const *) data, numData,
(double const *) assignments, flags) ;
break;
default:
abort();
}
}
/* ! VL_VLAD_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_VLAD_INSTANTIATING
|
Matrix.h | #pragma once
#include <algorithm>
#include <exception>
#include <functional>
#include <iostream>
#include <omp.h>
#include <stdexcept>
#include <type_traits>
#include <vector>
namespace cppmath
{
template <typename T> class Matrix
{
static_assert(std::is_floating_point<T>::value,
"An specialization of the matrix class has to be of a "
"floating point type!");
public:
using MatrixDataType = std::vector<std::vector<T>>;
Matrix() = delete;
Matrix(std::size_t rows, std::size_t cols);
Matrix(std::size_t rows, std::size_t cols, const T &value);
~Matrix() noexcept = default;
Matrix(const Matrix &other) = default;
Matrix &operator=(const Matrix &other) = default;
Matrix(Matrix &&other) noexcept = default;
Matrix &operator=(Matrix &&other) noexcept = default;
Matrix operator+(const Matrix &rhs);
Matrix &operator+=(const Matrix &rhs);
Matrix operator-(const Matrix &rhs);
Matrix &operator-=(const Matrix &rhs);
Matrix operator*(const T &scalar);
Matrix &operator*=(const T &scalar);
Matrix operator/(const T &scalar);
Matrix &operator/=(const T &scalar);
Matrix operator*(const Matrix &rhs);
Matrix &operator*=(const Matrix &rhs);
void dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result);
void parallel_dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result);
void print_matrix() const;
std::size_t num_rows() const;
std::size_t num_cols() const;
private:
std::size_t m_rows;
std::size_t m_cols;
MatrixDataType m_data;
};
template <typename T>
Matrix<T>::Matrix(std::size_t rows, std::size_t cols)
: m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, 0))
{
}
template <typename T>
Matrix<T>::Matrix(std::size_t rows, std::size_t cols, const T &value)
: m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, value))
{
}
template <typename T> Matrix<T> Matrix<T>::operator+(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(),
m_data[i].end(),
rhs.m_data[i].begin(),
result.m_data[i].begin(),
std::plus<T>());
}
return result;
}
template <typename T> Matrix<T> &Matrix<T>::operator+=(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::plus<T>());
}
return *this;
}
template <typename T> Matrix<T> Matrix<T>::operator-(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(),
m_data[i].end(),
rhs.m_data[i].begin(),
result.m_data[i].begin(),
std::minus<T>());
}
return result;
}
template <typename T> Matrix<T> &Matrix<T>::operator-=(const Matrix<T> &rhs)
{
if (m_rows != rhs.m_rows)
{
throw(std::invalid_argument("Number of rows are not equal!"));
}
if (m_cols != rhs.m_cols)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::minus<T>());
}
return *this;
}
template <typename T> Matrix<T> Matrix<T>::operator*(const T &scalar)
{
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T {
return val * scalar;
});
}
return result;
}
template <typename T> Matrix<T> &Matrix<T>::operator*=(const T &scalar)
{
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T {
return val * scalar;
});
}
return *this;
}
template <typename T> Matrix<T> Matrix<T>::operator/(const T &scalar)
{
if (scalar == 0)
{
throw(std::overflow_error("You cannot divide by a scalar value of zero!"));
}
Matrix<T> result(m_rows, m_cols);
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T {
return val / scalar;
});
}
return result;
}
template <typename T> Matrix<T> &Matrix<T>::operator/=(const T &scalar)
{
for (std::size_t i = 0; i != m_rows; ++i)
{
std::transform(m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T {
return val / scalar;
});
}
return *this;
}
template <typename T> Matrix<T> Matrix<T>::operator*(const Matrix<T> &rhs)
{
if (m_cols != rhs.m_rows)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
Matrix<T> result(m_rows, rhs.m_cols);
if (m_rows < 250 && m_cols < 250)
{
dot(*this, rhs, result);
}
else
{
parallel_dot(*this, rhs, result);
}
return result;
}
template <typename T> Matrix<T> &Matrix<T>::operator*=(const Matrix<T> &rhs)
{
if (m_cols != rhs.m_rows)
{
throw(std::invalid_argument("Number of cols are not equal!"));
}
*this = (*this) * rhs;
return *this;
}
template <typename T> void Matrix<T>::dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result)
{
for (std::size_t i = 0; i != matrixA.m_rows; ++i)
{
for (std::size_t j = 0; j != matrixB.m_cols; ++j)
{
for (std::size_t k = 0; k != matrixB.m_rows; ++k)
{
result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j];
}
}
}
}
template <typename T>
void Matrix<T>::parallel_dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result)
{
std::size_t i = 0;
std::size_t j = 0;
std::size_t k = 0;
#pragma omp parallel for shared(result) private(i, j, k) num_threads(12)
for (i = 0; i != matrixA.m_rows; ++i)
{
for (j = 0; j != matrixB.m_cols; ++j)
{
for (k = 0; k != matrixB.m_rows; ++k)
{
result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j];
}
}
}
}
template <typename T> void Matrix<T>::print_matrix() const
{
for (std::size_t i = 0; i < m_rows; ++i)
{
for (std::size_t j = 0; j < m_cols; ++j)
{
std::cout << m_data[i][j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
template <typename T> std::size_t Matrix<T>::num_rows() const
{
return m_rows;
}
template <typename T> std::size_t Matrix<T>::num_cols() const
{
return m_cols;
}
} // namespace cppmath
|
reductions.c | /*
Here we will investigate the performance of reductions using omp.
We will take different reductions on consecutive numbers.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "omp.h"
#include "./fasttime.h"
#define NUM_THREADS 4
void reduce_ADD(uint64_t);
void reduce_MUL(uint64_t);
void reduce_AND(uint64_t);
void reduce_OR_(uint64_t);
int main(int argc, char* argv[]) {
uint64_t N = 1000000000;
reduce_ADD(N);
reduce_MUL(N);
return 0;
}
void reduce_ADD(uint64_t N) {
size_t i, j;
uint64_t parall_result = 0;
uint64_t serial_result = 0;
fasttime_t start_time, endin_time;
// Test addition
start_time = gettime();
#pragma omp parallel for reduction(+ : parall_result) num_threads(NUM_THREADS)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
parall_result += j;
}
parall_result += i;
}
endin_time = gettime();
printf("%s: parall ADD in %f secs\n", __func__, tdiff(start_time, endin_time));
start_time = gettime();
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
serial_result += j;
}
serial_result += i;
}
endin_time = gettime();
printf("%s: serial ADD in %f secs\n", __func__, tdiff(start_time, endin_time));
// Check results
if (parall_result != serial_result)
printf("Error in function %s, parall_result = %llu and serial_result = %llu.\n",
__func__, parall_result, serial_result);
// Test subtraction
start_time = gettime();
#pragma omp parallel for reduction(- : parall_result) num_threads(NUM_THREADS)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
parall_result -= j;
}
parall_result -= i;
}
endin_time = gettime();
printf("%s: parall SUB in %f secs\n", __func__, tdiff(start_time, endin_time));
start_time = gettime();
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
serial_result -= j;
}
serial_result -= i;
}
endin_time = gettime();
printf("%s: serial SUB in %f secs\n", __func__, tdiff(start_time, endin_time));
// Check results
if (parall_result != 0 | serial_result != 0 | parall_result != serial_result)
printf("Error in function %s, parall_result = %llu and serial_result = %llu.\n",
__func__, parall_result, serial_result);
return;
}
void reduce_MUL(uint64_t N) {
size_t i, j;
uint64_t parall_result = 0;
uint64_t serial_result = 0;
fasttime_t start_time, endin_time;
// Test addition
start_time = gettime();
#pragma omp parallel for reduction(* : parall_result) num_threads(NUM_THREADS)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
parall_result *= j;
}
j -= 15;
parall_result *= i;
}
endin_time = gettime();
printf("%s: parall MUL in %f secs\n", __func__, tdiff(start_time, endin_time));
start_time = gettime();
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
serial_result *= j;
}
j -= 15;
serial_result *= i;
}
endin_time = gettime();
printf("%s: serial MUL in %f secs\n", __func__, tdiff(start_time, endin_time));
// Check results
if (parall_result != serial_result) printf("Error in function %s, result is not correct.\n", __func__);
return;
}
|
sgemm-openmp-kevin.c | #include <stdio.h>
#include <stdlib.h>
#include <emmintrin.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <omp.h>
#define NUM_THREADS 16
void mm_28( int m, int n, int ind1, int ind2, float *An, float *Cn);
void p_tail_4( int m, int r, int a, int x, int ind1, int ind2, float *An, float *Cn);
void tail(int r, int x, int a, int ind1, int ind2, float *An, float *Cn);
void p_tail_4_side(int m, int r, int a, int x, float *An, float *Cn);
void tail_side(int r, int x, int a, float *An, float *Cn);
void sgemm( int m, int n, float *A, float *C )
{
int BLOCKSIZE = 28;
int r = ( m / BLOCKSIZE ) * BLOCKSIZE;
int x = ( n / BLOCKSIZE ) * BLOCKSIZE;
int a = r + (m-r)/4*4;
int b = x + (n-x)/4*4;
int num_chunks = 16;
if(m > 5000)
num_chunks = 32;
if(m > 7000)
num_chunks = 16;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
int id = omp_get_thread_num();
int chunk = m/num_chunks;
// for(int j = 0; j < 13; j++)
/* if(id < NUM_THREADS-1)
{
mm_28(m,n,id*chunk,(id+1)*chunk,A+0,C+0);
mm_28(m,n,(16+id)*chunk,(16+id+1)*chunk,A+0,C+0);
}
*/
for(int j = 0; j < num_chunks-1; j++)
{
if(id == j % NUM_THREADS)
mm_28(m,n,(j*chunk),(j+1)*chunk,A+0,C+0);
}
if(id == NUM_THREADS-1)
{
//mm_28(m,n,id*chunk,(id+1)*chunk,A+0,C+0);
mm_28(m,n,((num_chunks-1)*chunk),m,A+0,C+0);
}
// if(id == 13)
// mm_28(m,n,r/504*504, r, A+0, C+0);
if(r != m)
{
// for(int j = 0; j < 16; j++)
/* if(id < NUM_THREADS-1)
{
p_tail_4(r,m,a,n,id*chunk,(id+1)*chunk, A+0, C+0);
p_tail_4(r,m,a,n,(16+id)*chunk,(16+id+1)*chunk, A+0, C+0);
}*/
// if(id > 7)
for( int j = 0; j < num_chunks-1; j++)
{
if(id == j % NUM_THREADS)
p_tail_4(r,m,a,n,j*chunk,(j+1)*chunk,A+0,C+0);
}
if(id == NUM_THREADS-1)
{
p_tail_4(r,m,a,n,(num_chunks-1)*chunk,m,A+0,C+0);
// p_tail_4(r,m,a,n,(id+16)*chunk,m,A+0,C+0);
}
// if(id == 14)
// p_tail_4_side(r,m,a,n, A+0, C+0);
}
if(m % 4 !=0)
{
/* if(id < NUM_THREADS-1)
{
tail(m,n,a,id*chunk,(id+1)*chunk,A+0,C+0);
tail(m,n,a,(16+id)*chunk,(16+id+1)*chunk,A+0,C+0);
}*/
for(int j = 0; j < num_chunks-1; j++)
{
if(id == j % NUM_THREADS)
tail(m,n,a,j*chunk,(j+1)*chunk,A+0,C+0);
}
if(id == NUM_THREADS-1)
{
tail(m,n,a,(num_chunks-1)*chunk,m,A+0,C+0);
//tail(m,n,a,(16+id)*chunk,m,A+0,C+0);
}
// if(id == 12)
// tail_side(m,n,a,A+0,C+0);
}
}
}
void mm_28( int r, int x, int ind1, int ind2, float *An, float *Cn)
{
int BLOCKSIZE = 28;
int m = ( r / BLOCKSIZE ) * BLOCKSIZE;
int i,j,k, blockInd1;
__m128 c0,c1,c2,c3,c4,c5,c6,a0,a1,a2,a3,a4,a5,a6,a0T;
/*------------------------------------------------------------------------*/
#pragma omp nowait
{
/* Start parallel multiply big block */
#pragma omp private(ind1,ind2,j,k,c0,c1,c2,c3,c4,c5,c6,a0,a1,a2,a3,a4,a5,a6,a0T,blockInd1)
for( j = ind1; j < ind2; j++)
{
for(blockInd1 = 0; blockInd1 < m; blockInd1 += BLOCKSIZE)
{
/* Load C data into registers */
c0 = _mm_loadu_ps(Cn+blockInd1+j*r);
c1 = _mm_loadu_ps(Cn+blockInd1+4+j*r);
c2 = _mm_loadu_ps(Cn+blockInd1+8+j*r);
c3 = _mm_loadu_ps(Cn+blockInd1+12+j*r);
c4 = _mm_loadu_ps(Cn+blockInd1+16+j*r);
/* Experiment */
c5 = _mm_loadu_ps(Cn+blockInd1+20+j*r);
c6 = _mm_loadu_ps(Cn+blockInd1+24+j*r);
//c7 = _mm_loadu_ps(Cn+blockInd1+28+j*r);
for(k = 0; k < x; k++)
{
/* Load the value that will be multiplied across multiple a's */
a0T = _mm_load1_ps(An+j+k*r);
/* Load the values to be multiplied */
a0 = _mm_loadu_ps(An+blockInd1+k*r);
a1 = _mm_loadu_ps(An+blockInd1+4+k*r);
a2 = _mm_loadu_ps(An+blockInd1+8+k*r);
a3 = _mm_loadu_ps(An+blockInd1+12+k*r);
a4 = _mm_loadu_ps(An+blockInd1+16+k*r);
/* Experiment */
a5 = _mm_loadu_ps(An+blockInd1+20+k*r);
a6 = _mm_loadu_ps(An+blockInd1+24+k*r);
//a7 = _mm_loadu_ps(An+blockInd1+28+k*r);
/* Multiply */
a0 = _mm_mul_ps(a0, a0T);
a1 = _mm_mul_ps(a1, a0T);
a2 = _mm_mul_ps(a2, a0T);
a3 = _mm_mul_ps(a3, a0T);
a4 = _mm_mul_ps(a4, a0T);
/* Experiment */
a5 = _mm_mul_ps(a5,a0T);
a6 = _mm_mul_ps(a6,a0T);
//a7 = _mm_mul_ps(a7,a0T);
/* Add */
c0 = _mm_add_ps(c0, a0);
c1 = _mm_add_ps(c1, a1);
c2 = _mm_add_ps(c2,a2);
c3 = _mm_add_ps(c3,a3);
c4 = _mm_add_ps(c4, a4);
/* Experiment */
c5 = _mm_add_ps(c5,a5);
c6 = _mm_add_ps(c6,a6);
//c7 = _mm_add_ps(c7,a7);
}
/* Store the registers back into Cn */
_mm_storeu_ps(Cn+blockInd1+j*r, c0);
_mm_storeu_ps(Cn+blockInd1+4+j*r, c1);
_mm_storeu_ps(Cn+blockInd1+8+j*r, c2);
_mm_storeu_ps(Cn+blockInd1+12+j*r, c3);
_mm_storeu_ps(Cn+blockInd1+16+j*r, c4);
/* Experiment */
_mm_storeu_ps(Cn+blockInd1+20+j*r,c5);
_mm_storeu_ps(Cn+blockInd1+24+j*r,c6);
//_mm_storeu_ps(Cn+blockInd1+28+j*r,c7);
}
}
/* End Parallel Multiply Big Block */
}
/*---------------------------------------------------------------------*/
}
/* End Program */
void tail( int r, int x, int a, int ind1, int ind2, float *An, float *Cn)
{
int i,j,k;
// if(id == 14)
// {
#pragma omp nowait
{
#pragma omp private(i,k,j,ind1,ind2)
//small bottom
for( j = ind1; j < ind2; j++)
{
float temp1 = 0;
float temp2 = 0;
float temp3 = 0;
for( k = 0; k < x; k++)
{
for( i = a; i < r/2*2; i+=2)
{
temp1 += An[i+k*r] * An[j+k*r];
temp2 += An[i+1+k*r] * An[j+k*r];
}
for( i = r/2*2; i < r; i++)
{
temp3 += An[i+k*r] * An[j+k*r];
}
}
if(r-a > 1)
{
Cn[a+j*r] += temp1;
Cn[a+1+j*r] += temp2;
}
if(r-a != 2)
Cn[r/2*2+j*r] += temp3;
}
}
}
/*
void tail_side( int r, int x, int a, float *An, float *Cn)
{
int i,j,k;
#pragma omp private(i,k,j);
//small side
for( j = a; j < r; j++)
{
for( k = 0; k < x; k++)
{
for( i = 0; i < a/8*8; i+=8)
{
Cn[i+j*r] += An[i+k*r] * An[j+k*r];
Cn[i+1+j*r] += An[i+1+k*r] * An[j+k*r];
Cn[i+2+j*r] += An[i+2+k*r] * An[j+k*r];
Cn[i+3+j*r] += An[i+3+k*r] * An[j+k*r];
Cn[i+4+j*r] += An[i+4+k*r] * An[j+k*r];
Cn[i+5+j*r] += An[i+5+k*r] * An[j+k*r];
Cn[i+6+j*r] += An[i+6+k*r] * An[j+k*r];
Cn[i+7+j*r] += An[i+7+k*r] * An[j+k*r];
}
for( i = a/8*8; i < a; i++) Cn[i+j*r] += An[i+k*r] * An[j+k*r];
}
}
}
*/
void p_tail_4(int m, int r, int a, int x, int ind1, int ind2, float *An, float *Cn)
{
int i,j,k;
__m128 c0,c1,c2,c3,c4,c5,a0,a1,a2,a3,a4,a5,a0T;
#pragma omp nowait
{
#pragma omp private(i,j,k,c0,c1,c2,c3,c4,c5,a0,a1,a2,a3,a4,a5,a0T,ind1,ind2)
//parallel bottom
if((m-a) % 24 == 0)
{
for( j = ind1 ; j < ind2; j++)
{
for( i = m; i < a; i+=24 )
{
c0 = _mm_loadu_ps(Cn+i+j*r);
c1 = _mm_loadu_ps(Cn+i+4+j*r);
c2 = _mm_loadu_ps(Cn+i+8+j*r);
c3 = _mm_loadu_ps(Cn+i+12+j*r);
c4 = _mm_loadu_ps(Cn+i+16+j*r);
c5 = _mm_loadu_ps(Cn+i+20+j*r);
for( k = 0; k < x; k++ )
{
a0T = _mm_load1_ps(An+j+k*r);
a0 = _mm_loadu_ps(An+i+k*r);
a1 = _mm_loadu_ps(An+i+4+k*r);
a2 = _mm_loadu_ps(An+i+8+k*r);
a3 = _mm_loadu_ps(An+i+12+k*r);
a4 = _mm_loadu_ps(An+i+16+k*r);
a5 = _mm_loadu_ps(An+i+20+k*r);
a0 = _mm_mul_ps(a0, a0T);
a1 = _mm_mul_ps(a1, a0T);
a2 = _mm_mul_ps(a2, a0T);
a3 = _mm_mul_ps(a3, a0T);
a4 = _mm_mul_ps(a4, a0T);
a5 = _mm_mul_ps(a5, a0T);
c0 = _mm_add_ps(c0, a0);
c1 = _mm_add_ps(c1, a1);
c2 = _mm_add_ps(c2, a2);
c3 = _mm_add_ps(c3, a3);
c4 = _mm_add_ps(c4, a4);
c5 = _mm_add_ps(c5, a5);
}
_mm_storeu_ps(Cn+i+j*r, c0);
_mm_storeu_ps(Cn+i+4+j*r, c1);
_mm_storeu_ps(Cn+i+8+j*r, c2);
_mm_storeu_ps(Cn+i+12+j*r, c3);
_mm_storeu_ps(Cn+i+16+j*r, c4);
_mm_storeu_ps(Cn+i+20+j*r, c5);
}
}
}else if((m-a) % 20 == 0){
for( j = ind1 ; j < ind2; j++)
{
for( i = m; i < a; i+=20 )
{
c0 = _mm_loadu_ps(Cn+i+j*r);
c1 = _mm_loadu_ps(Cn+i+4+j*r);
c2 = _mm_loadu_ps(Cn+i+8+j*r);
c3 = _mm_loadu_ps(Cn+i+12+j*r);
c4 = _mm_loadu_ps(Cn+i+16+j*r);
for( k = 0; k < x; k++ )
{
a0T = _mm_load1_ps(An+j+k*r);
a0 = _mm_loadu_ps(An+i+k*r);
a1 = _mm_loadu_ps(An+i+4+k*r);
a2 = _mm_loadu_ps(An+i+8+k*r);
a3 = _mm_loadu_ps(An+i+12+k*r);
a4 = _mm_loadu_ps(An+i+16+k*r);
a0 = _mm_mul_ps(a0, a0T);
a1 = _mm_mul_ps(a1, a0T);
a2 = _mm_mul_ps(a2, a0T);
a3 = _mm_mul_ps(a3, a0T);
a4 = _mm_mul_ps(a4, a0T);
c0 = _mm_add_ps(c0, a0);
c1 = _mm_add_ps(c1, a1);
c2 = _mm_add_ps(c2, a2);
c3 = _mm_add_ps(c3, a3);
c4 = _mm_add_ps(c4, a4);
}
_mm_storeu_ps(Cn+i+j*r, c0);
_mm_storeu_ps(Cn+i+4+j*r, c1);
_mm_storeu_ps(Cn+i+8+j*r, c2);
_mm_storeu_ps(Cn+i+12+j*r, c3);
_mm_storeu_ps(Cn+i+16+j*r, c4);
}
}
}else if((m-a) % 16 == 0){
for( j = ind1 ; j < ind2; j++)
{
for( i = m; i < a; i+=16 )
{
c0 = _mm_loadu_ps(Cn+i+j*r);
c1 = _mm_loadu_ps(Cn+i+4+j*r);
c2 = _mm_loadu_ps(Cn+i+8+j*r);
c3 = _mm_loadu_ps(Cn+i+12+j*r);
for( k = 0; k < x; k++ )
{
a0T = _mm_load1_ps(An+j+k*r);
a0 = _mm_loadu_ps(An+i+k*r);
a1 = _mm_loadu_ps(An+i+4+k*r);
a2 = _mm_loadu_ps(An+i+8+k*r);
a3 = _mm_loadu_ps(An+i+12+k*r);
a0 = _mm_mul_ps(a0, a0T);
a1 = _mm_mul_ps(a1, a0T);
a2 = _mm_mul_ps(a2, a0T);
a3 = _mm_mul_ps(a3, a0T);
c0 = _mm_add_ps(c0, a0);
c1 = _mm_add_ps(c1, a1);
c2 = _mm_add_ps(c2, a2);
c3 = _mm_add_ps(c3, a3);
}
_mm_storeu_ps(Cn+i+j*r, c0);
_mm_storeu_ps(Cn+i+4+j*r, c1);
_mm_storeu_ps(Cn+i+8+j*r, c2);
_mm_storeu_ps(Cn+i+12+j*r, c3);
}
}
}else if((m-a) % 12 == 0){
for( j = ind1 ; j < ind2; j++)
{
for( i = m; i < a; i+=12 )
{
c0 = _mm_loadu_ps(Cn+i+j*r);
c1 = _mm_loadu_ps(Cn+i+4+j*r);
c2 = _mm_loadu_ps(Cn+i+8+j*r);
for( k = 0; k < x; k++ )
{
a0T = _mm_load1_ps(An+j+k*r);
a0 = _mm_loadu_ps(An+i+k*r);
a1 = _mm_loadu_ps(An+i+4+k*r);
a2 = _mm_loadu_ps(An+i+8+k*r);
a0 = _mm_mul_ps(a0, a0T);
a1 = _mm_mul_ps(a1, a0T);
a2 = _mm_mul_ps(a2, a0T);
c0 = _mm_add_ps(c0, a0);
c1 = _mm_add_ps(c1, a1);
c2 = _mm_add_ps(c2, a2);
}
_mm_storeu_ps(Cn+i+j*r, c0);
_mm_storeu_ps(Cn+i+4+j*r, c1);
_mm_storeu_ps(Cn+i+8+j*r, c2);
}
}
}else if((m-a) % 8 == 0){
for( j = ind1 ; j < ind2; j++)
{
for( i = m; i < a; i+=20 )
{
c0 = _mm_loadu_ps(Cn+i+j*r);
c1 = _mm_loadu_ps(Cn+i+4+j*r);
for( k = 0; k < x; k++ )
{
a0T = _mm_load1_ps(An+j+k*r);
a0 = _mm_loadu_ps(An+i+k*r);
a1 = _mm_loadu_ps(An+i+4+k*r);
a0 = _mm_mul_ps(a0, a0T);
a1 = _mm_mul_ps(a1, a0T);
c0 = _mm_add_ps(c0, a0);
c1 = _mm_add_ps(c1, a1);
}
_mm_storeu_ps(Cn+i+j*r, c0);
_mm_storeu_ps(Cn+i+4+j*r, c1);
}
}
}else{
for( j = ind1 ; j < ind2; j++)
{
for( i = m; i < a; i+=4 )
{
c0 = _mm_loadu_ps(Cn+i+j*r);
for( k = 0; k < x; k++ )
{
a0T = _mm_load1_ps(An+j+k*r);
a0 = _mm_loadu_ps(An+i+k*r);
a0 = _mm_mul_ps(a0, a0T);
c0 = _mm_add_ps(c0, a0);
}
_mm_storeu_ps(Cn+i+j*r, c0);
}
}
}
}
}
/*
void p_tail_4_side(int m, int r, int a, int x, float *An, float *Cn)
{
int i,j,k;
__m128 c0,a0,a0T;
#pragma omp private(i,j,k,c0,a0,a0T)
//parallel sides
for( j = m; j < a; j++)
{
for( i = 0; i < m; i+=4)
{
c0 = _mm_loadu_ps(Cn+i+j*r);
for( k = 0; k < x; k++)
{
a0T = _mm_load1_ps(An+j+k*r);
a0 = _mm_loadu_ps(An+i+k*r);
a0 = _mm_mul_ps(a0, a0T);
c0 = _mm_add_ps(c0, a0);
}
_mm_storeu_ps(Cn+i+j*r, c0);
}
}
}
*/
|
irbuilder_for_unsigned_dynamic_chunked.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@workshareloop_unsigned_dynamic_chunked(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 33, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: store i32 1, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: store i32 %[[DOTCOUNT]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 1073741859, i32 1, i32 %[[DOTCOUNT]], i32 1, i32 5)
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER_OUTER_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER:.*]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ %[[LB:.+]], %[[OMP_LOOP_PREHEADER_OUTER_COND]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[UB:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[UB]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_PREHEADER_OUTER_COND]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP4]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = zext i32 %[[TMP7]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]]
// CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = zext i32 %[[TMP10]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]]
// CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = zext i32 %[[TMP13]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT:.*]]:
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER_OUTER_COND]]:
// CHECK-NEXT: %[[TMP14:.+]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]])
// CHECK-NEXT: %[[TMP15:.+]] = icmp ne i32 %[[TMP14]], 0
// CHECK-NEXT: %[[TMP16:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[LB]] = sub i32 %[[TMP16]], 1
// CHECK-NEXT: br i1 %[[TMP15]], label %[[OMP_LOOP_HEADER]], label %[[OMP_LOOP_EXIT]]
// CHECK-NEXT: }
extern "C" void workshareloop_unsigned_dynamic_chunked(float *a, float *b, float *c, float *d) {
#pragma omp for schedule(dynamic, 5)
for (unsigned i = 33; i < 32000000; i += 7) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 32000000, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 7, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp ult i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 7, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45}
// CHECK: ![[META2:[0-9]+]] =
|
naive_math_impl.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <algorithm>
#include <cmath>
template <typename type>
static void basic_trans_mat_to_c4(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 3) / 4 * 4;
int k_round = (K + 3) / 4 * 4;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 4;
type* zero_buf = new type[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 4 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
if (4 * (i + 1) - M > 0) {
switch (4 * (i + 1) - M) {
case 3:
in1 = zero_buf;
case 2:
in2 = zero_buf;
case 1:
in3 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
delete[] zero_buf;
}
template <typename type>
static void basic_trans_mat_to_c8(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 7) / 8 * 8;
int k_round = (K + 7) / 8 * 8;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 8;
type zero_buf[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 8 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
const type* in4 = in3 + ldin;
const type* in5 = in4 + ldin;
const type* in6 = in5 + ldin;
const type* in7 = in6 + ldin;
if (8 * (i + 1) - M > 0) {
switch (8 * (i + 1) - M) {
case 7:
in1 = zero_buf;
case 6:
in2 = zero_buf;
case 5:
in3 = zero_buf;
case 4:
in4 = zero_buf;
case 3:
in5 = zero_buf;
case 2:
in6 = zero_buf;
case 1:
in7 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
*output++ = *in4++;
*output++ = *in5++;
*output++ = *in6++;
*output++ = *in7++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
}
template <typename type, typename type2>
static void basic_gemm_c4(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm_c8(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f,
float scale = 6.f,
float offset = 3.f,
float threshold = 6.f) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i * ldc + j] =
tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0);
} else if (flag_act == 2) { // relu 6
c[i * ldc + j] =
tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0);
c[i * ldc + j] = c[i * ldc + j] < static_cast<type2>(six)
? c[i * ldc + j]
: static_cast<type2>(six);
} else if (flag_act == 4) { // leaky relu
c[i * ldc + j] = tmp < static_cast<type2>(0)
? static_cast<type2>(tmp * leakey_relu_alpha)
: tmp;
} else if (flag_act == 10) { // hard swish
auto tmp1 = tmp + offset;
if (tmp1 > 0) {
if (tmp1 < threshold) {
c[i * ldc + j] = static_cast<type2>(tmp1 * tmp * 1.0 / scale);
} else {
c[i * ldc + j] =
static_cast<type2>(threshold * tmp * 1.0 / scale);
}
} else {
if (threshold > 0) {
c[i * ldc + j] = static_cast<type2>(0);
} else {
c[i * ldc + j] =
static_cast<type2>(threshold * tmp * 1.0 / scale);
}
}
}
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f,
float scale = 6.f,
float offset = 3.f,
float threshold = 6.f) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else if (flag_act == 2) { // relu 6
c[i] = tmp > (type2)0 ? tmp : (type2)0;
c[i] = c[i] < six ? c[i] : six; // ut compute
} else if (flag_act == 4) { // leakey relu
c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp;
} else if (flag_act == 10) { // hard_swish
c[i] = std::min(static_cast<type2>(threshold),
std::max(static_cast<type2>(0),
static_cast<type2>(tmp + offset))) *
static_cast<type2>(tmp * 1.0 / scale);
}
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
int act_type,
float six = 6.f,
float scale = 1.f,
const float hard_scale = 6.f,
const float offset = 3.f,
const float threshold = 6.f) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (act_type > 0) {
// 1-relu 2-relu6 4-leakyrelu
if (act_type == 1) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
} else if (act_type == 2) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six
? dst_data_ref[out_idx]
: (Dtype2)six;
} else if (act_type == 4) {
dst_data_ref[out_idx] =
dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)(dst_data_ref[out_idx] * scale);
} else if (act_type == 10) {
auto tmp = dst_data_ref[out_idx] + offset;
auto tmp1 = dst_data_ref[out_idx] * 1.0 / hard_scale;
if (tmp > 0) {
if (tmp < threshold) {
dst_data_ref[out_idx] = static_cast<Dtype2>(tmp * tmp1);
} else {
dst_data_ref[out_idx] =
static_cast<Dtype2>(threshold * tmp1);
}
} else {
if (threshold > 0) {
dst_data_ref[out_idx] = static_cast<Dtype2>(0);
} else {
dst_data_ref[out_idx] =
static_cast<Dtype2>(threshold * tmp1);
}
}
} else {
printf("this act type: %d does not support \n", act_type);
}
}
}
}
}
}
}
}
template <typename Dtype>
static void fill_bias_relu(Dtype* tensor,
const Dtype* bias,
int channel,
int channel_size,
bool flag_bias,
bool flag_relu) {
Dtype* data = tensor;
for (int j = 0; j < channel; ++j) {
Dtype bias_c = flag_bias ? bias[j] : 0;
for (int i = 0; i < channel_size; i++) {
data[i] += bias_c;
if (flag_relu) {
data[i] = data[i] > 0 ? data[i] : 0.f;
}
}
data += channel_size;
}
}
template <typename Dtype>
static void do_relu(Dtype* tensor, int size) {
for (int j = 0; j < size; ++j) {
tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0;
}
}
inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template <typename Dtype>
static void col2im(const Dtype* data_col,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h0,
const int pad_h1,
const int pad_w0,
const int pad_w1,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
Dtype* data_im) {
memset(data_im, 0, height * width * channels * sizeof(Dtype));
const int output_h =
(height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) /
stride_h +
1;
const int output_w =
(width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h0 + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
data_col += output_w;
} else {
int input_col = -pad_w0 + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
data_im[input_row * width + input_col] += *data_col;
}
data_col++;
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deconv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w0,
int pad_w1,
int pad_h0,
int pad_h1,
bool flag_bias,
bool flag_relu) {
int m = chout * kernel_w * kernel_h / group;
int n = hin * win;
int k = chin / group;
int group_size_in = win * hin * chin / group;
int group_size_coldata = m * n;
int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group);
bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) &&
(stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) &&
(pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) &&
(dila_h == 1);
Dtype2* workspace_ptr =
static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group));
for (int i = 0; i < num; ++i) {
const Dtype1* din_batch = din + i * chin * hin * win;
Dtype2* dout_batch = dout + i * chout * hout * wout;
Dtype2* col_data = workspace_ptr;
if (flag_1x1s1p1) {
col_data = dout_batch;
}
memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group);
for (int g = 0; g < group; ++g) {
const Dtype1* din_group = din_batch + g * group_size_in;
const Dtype1* weights_group = weights + g * group_size_weights;
Dtype2* coldata_group = col_data + g * group_size_coldata;
basic_gemm<Dtype1, Dtype2>(true,
false,
m,
n,
k,
1,
weights_group,
m,
din_group,
n,
0,
coldata_group,
n,
nullptr,
false,
(!flag_bias && flag_relu));
}
if (!flag_1x1s1p1) {
col2im(col_data,
chout,
hout,
wout,
kernel_h,
kernel_w,
pad_h0,
pad_h1,
pad_w0,
pad_w1,
stride_h,
stride_w,
dila_h,
dila_w,
dout_batch);
}
//! add bias
if (flag_bias) {
fill_bias_relu(
dout_batch, bias, chout, wout * hout, flag_bias, flag_relu);
}
}
free(workspace_ptr);
}
float deformable_bilinear(const float* bottom_data,
const int data_width,
const int height,
const int width,
float h,
float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = static_cast<float>(h_low);
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = static_cast<float>(w_low);
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh;
float hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw;
float w2 = hh * lw;
float w3 = lh * hw;
float w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deformable_conv_basic(const Dtype1* in_data,
const float* offset_data,
const float* mask_data,
Dtype2* out_data,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
bool flag_relu,
bool modulated) {
int out_c_group = chout / group;
int in_c_group = chin / group;
int in_size = hin * win;
int out_size = hout * wout;
int c_in_size = chin * in_size;
int c_out_size = chout * out_size;
int kernel_size = kernel_w * kernel_h;
for (int n = 0; n < num; n++) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < hout; oh++) {
for (int ow = 0; ow < wout; ow++) {
int out_idx = n * c_out_size + g * out_c_group * out_size +
oc * out_size + oh * wout + ow;
Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0;
out_data[out_idx] = bias_d + out_data[out_idx];
for (int ic = 0; ic < in_c_group; ++ic) {
for (int fh = 0; fh < kernel_h; fh++) {
for (int fw = 0; fw < kernel_w; fw++) {
const float* offset_data_ptr =
offset_data + n * group * 2 * kernel_size * out_size +
g * 2 * kernel_size * out_size;
const int data_offset_h_ptr =
((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow;
const int data_offset_w_ptr =
((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow;
const float offset_h = offset_data_ptr[data_offset_h_ptr];
const float offset_w = offset_data_ptr[data_offset_w_ptr];
const float iw =
ow * stride_w - pad_w + kernel_w * dila_w + offset_w;
const float ih =
oh * stride_h - pad_h + kernel_h * dila_h + offset_h;
if (ih >= 0 && ih < hin && iw >= 0 && iw < win) {
const float map_h = kernel_h * dila_h + offset_h;
const float map_w = kernel_w * dila_w + offset_w;
const int cur_height = hin - (oh * stride_h - pad_h);
const int cur_width = win - (ow * stride_w - pad_w);
const float* in_data_offset =
in_data + n * c_in_size +
(g * in_c_group + ic) * in_size +
(oh * stride_h - pad_h) * win + (ow * stride_w - pad_w);
float val = deformable_bilinear(in_data_offset,
win,
cur_height,
cur_width,
map_h,
map_w);
if (modulated) {
// use mask
const float* mask_ptr =
mask_data + n * group * kernel_size * out_size +
g * kernel_size * out_size +
(fh * kernel_w + fw) * hout * wout + oh * wout + ow;
val *= mask_ptr[0];
}
int widx = g * out_c_group * in_c_group * kernel_size +
oc * in_c_group * kernel_size +
ic * kernel_size + fh * kernel_w + fw;
out_data[out_idx] += val * weights[widx];
}
}
}
}
if (flag_relu) {
out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0;
}
}
}
}
}
}
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
bool CalculateConstraintSatisfaction(ConceptDecl *NamedConcept,
MultiLevelTemplateArgumentList &MLTAL,
Expr *ConstraintExpr,
bool &IsSatisfied);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(FunctionDecl *MD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, TemplateDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
using OMPCtxStringType = SmallString<8>;
using OMPCtxSelectorData =
OpenMPCtxSelectorData<OMPCtxStringType, SmallVector<OMPCtxStringType, 4>,
ExprResult>;
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
SourceRange SR,
ArrayRef<OMPCtxSelectorData> Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
bool &DerivedToBase, bool &ObjCConversion,
bool &ObjCLifetimeConversion,
bool &FunctionConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
simulation.c | /*******************************************************************************
* Copyright (C) 2015-2019 Commissariat a l'energie atomique et aux energies alternatives (CEA)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
******************************************************************************/
#include <mpi.h>
#include <omp.h>
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <paraconf.h>
// load the PDI header
#include <pdi.h>
/// size of the local data as [HEIGHT, WIDTH] including ghosts & boundary constants
int dsize[2];
/// 2D size of the process grid as [HEIGHT, WIDTH]
int psize[2];
/// 2D rank of the local process in the process grid as [YY, XX]
int pcoord[2];
/** Initialize the data all to 0 except for the left border (XX==0) initialized to 1 million
* \param[out] dat the local data to initialize
*/
void init(double dat[dsize[0]][dsize[1]])
{
for (int yy=0; yy<dsize[0]; ++yy) for (int xx=0; xx<dsize[1]; ++xx) dat[yy][xx] = 0;
if ( pcoord[1] == 0 ) for (int yy=0; yy<dsize[0]; ++yy) dat[yy][0] = 1000000;
}
/** Compute the values at the next time-step based on the values at the current time-step
* \param[in] cur the local data at the current time-step
* \param[out] next the local data at the next time-step
*/
void iter(int dsize[2], double cur[dsize[0]][dsize[1]], double next[dsize[0]][dsize[1]])
{
for (int xx=0; xx<dsize[1]; ++xx) {
next[0][xx] = cur[0][xx];
}
#pragma omp parallel for
for (int yy=1; yy<dsize[0]-1; ++yy) {
next[yy][0] = cur[yy][0];
for (int xx=1; xx<dsize[1]-1; ++xx) {
next[yy][xx] =
(cur[yy][xx] *.5)
+ (cur[yy][xx-1] *.125)
+ (cur[yy][xx+1] *.125)
+ (cur[yy-1][xx] *.125)
+ (cur[yy+1][xx] *.125);
}
next[yy][dsize[1]-1] = cur[yy][dsize[1]-1];
}
for (int xx=0; xx<dsize[1]; ++xx) {
next[dsize[0]-1][xx] = cur[dsize[0]-1][xx];
}
}
/** Exchanges ghost values with neighbours
* \param[in] cart_comm the MPI communicator with all processes organized in a 2D Cartesian grid
* \param[in] cur the local data at the current time-step whose ghosts need exchanging
*/
void exchange(MPI_Comm cart_comm, double cur[dsize[0]][dsize[1]])
{
MPI_Status status;
int rank_source, rank_dest;
static MPI_Datatype column, row;
static int initialized = 0;
if ( !initialized ) {
MPI_Type_vector(dsize[0]-2, 1, dsize[1], MPI_INT, &column);
MPI_Type_commit(&column);
MPI_Type_contiguous(dsize[1]-2, MPI_INT, &row);
MPI_Type_commit(&row);
initialized = 1;
}
// send down
MPI_Cart_shift(cart_comm, 0, 1, &rank_source, &rank_dest);
MPI_Sendrecv(&cur[dsize[0]-2][1], 1, row, rank_dest, 100, // send row before ghost
&cur[0][1], 1, row, rank_source, 100, // receive 1st row (ghost)
cart_comm, &status);
// send up
MPI_Cart_shift(cart_comm, 0, -1, &rank_source, &rank_dest);
MPI_Sendrecv(&cur[1][1], 1, row, rank_dest, 100, // send column after ghost
&cur[dsize[0]-1][1], 1, row, rank_source, 100, // receive last column (ghost)
cart_comm, &status);
// send to the right
MPI_Cart_shift(cart_comm, 1, 1, &rank_source, &rank_dest);
MPI_Sendrecv(&cur[1][dsize[1]-2], 1, column, rank_dest, 100, // send column before ghost
&cur[1][0], 1, column, rank_source, 100, // receive 1st column (ghost)
cart_comm, &status);
// send to the left
MPI_Cart_shift(cart_comm, 1, -1, &rank_source, &rank_dest);
MPI_Sendrecv(&cur[1][1], 1, column, rank_dest, 100, // send column after ghost
&cur[1][dsize[1]-1], 1, column, rank_source, 100, // receive last column (ghost)
cart_comm, &status);
}
int main( int argc, char* argv[] )
{
MPI_Init(&argc, &argv);
// load the configuration tree
PC_tree_t conf = PC_parse_path("simulation.yml");
// NEVER USE MPI_COMM_WORLD IN THE CODE, use our own communicator main_comm instead
MPI_Comm main_comm = MPI_COMM_WORLD;
// load the MPI rank & size
int psize_1d; MPI_Comm_size(main_comm, &psize_1d);
int pcoord_1d; MPI_Comm_rank(main_comm, &pcoord_1d);
long longval;
// load the global data-size
int global_size[2];
PC_int(PC_get(conf, ".global_size.height"), &longval); global_size[0] = longval;
PC_int(PC_get(conf, ".global_size.width"), &longval); global_size[1] = longval;
// load the parallelism configuration
PC_int(PC_get(conf, ".parallelism.height"), &longval); psize[0] = longval;
PC_int(PC_get(conf, ".parallelism.width" ), &longval); psize[1] = longval;
// load the generation configuration
long generations ;
PC_int(PC_get(conf, ".MaxtimeSteps" ), &generations);
conf = PC_parse_path("simulation.yml");
PDI_init(PC_get(conf, ".pdi"));
// check the configuration is coherent
assert(global_size[0]%psize[0]==0);
assert(global_size[1]%psize[1]==0);
assert(psize[1]*psize[0] == psize_1d);
// compute the local data-size with space for ghosts and boundary constants
dsize[0] = global_size[0]/psize[0] + 2;
dsize[1] = global_size[1]/psize[1] + 2;
// create a 2D Cartesian MPI communicator & get our coordinate (rank) in it
int cart_period[2] = { 0, 0 };
MPI_Comm cart_comm; MPI_Cart_create(main_comm, 2, psize, cart_period, 1, &cart_comm);
MPI_Cart_coords(cart_comm, pcoord_1d, 2, pcoord);
// allocate memory for the int buffered data
double(*cur)[dsize[1]] = malloc(sizeof(double)*dsize[1]*dsize[0]);
double(*next)[dsize[1]] = malloc(sizeof(double)*dsize[1]*dsize[0]);
// initialize the data content
init(cur);
// our loop counter so as to be able to use it outside the loop
int ii=0;
// share useful configuration bits with PDI
PDI_multi_expose("init",
"pcoord", pcoord, PDI_OUT,
"pcoord_1d", &pcoord_1d, PDI_OUT,
"dsize", dsize, PDI_OUT,
"psize", psize, PDI_OUT,
"timestep", &ii, PDI_OUT,
"MaxtimeSteps", &generations, PDI_OUT,
NULL);
// the main loop
for (; ii<generations; ++ii) {
PDI_multi_expose("Available",
"timestep", &ii, PDI_OUT,
"local_t", cur, PDI_OUT,
NULL);
// compute the values for the next iteration
iter(dsize, cur, next);
// exchange data with the neighbours
exchange(cart_comm, next);
// swap the current and next values
double (*tmp)[dsize[1]] = cur; cur = next; next = tmp;
MPI_Barrier(cart_comm);
}
// finalize PDI
PDI_finalize();
// destroy the paraconf configuration tree
PC_tree_destroy(&conf);
// free the allocated memory
free(cur);
free(next);
// finalize MPI
MPI_Finalize();
fprintf(stderr, "[%d] SUCCESS\n", pcoord_1d);
return EXIT_SUCCESS;
}
|
GB_binop__times_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__times_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint32)
// A*D function (colscale): GB (_AxD__times_uint32)
// D*A function (rowscale): GB (_DxB__times_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint32)
// C=scalar+B GB (_bind1st__times_uint32)
// C=scalar+B' GB (_bind1st_tran__times_uint32)
// C=A+scalar GB (_bind2nd__times_uint32)
// C=A'+scalar GB (_bind2nd_tran__times_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
read_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#include <omp.h>
// module load gnu/9.3.0
// export OMP_NUM_THREADS=10
// # compiling: link header in stb folder
// gcc -fopenmp -std=gnu99 read_omp.c -o read_omp.x -Istb/ -lm
// # insida a computational node:
// /usr/bin/time ./read_omp.x photo5798471721416766496.jpg
int main(int argc,char* argv[]) {
int width,height,channels;
unsigned char *img = stbi_load(argv[1], &width, &height, &channels, 0);
#pragma OPTIMIZE OFF
FILE* f;
FILE* f_list;
f=fopen("color.txt","r");
int* r=malloc(sizeof(int)*50);
int* g=malloc(sizeof(int)*50);
int* b=malloc(sizeof(int)*50);
int* freq=malloc(50* sizeof(int));
for(int i=0;i<50;i++) {
int r_t,g_t,b_t;
fscanf(f,"(%d,%d,%d)\n",&r_t,&g_t,&b_t);
r[i]=r_t;
g[i]=g_t;
b[i]=b_t;
// printf("%d %d %d\n",r[i],g[i],b[i]);
freq[i]=0;
}
#pragma OPTIMIZE ON
int index;
double dist_t;
int v1,v2,v3;
double dist;
#pragma omp parallel shared(img, freq) firstprivate(dist_t, dist, v1, v2, v3, index, r, g, b) proc_bind(close)
{
#pragma omp for reduction(+: freq[:50]) // freq reduction as a 50 dim array
for(int i=0;i<width*height*3;i=i+3) {
// printf("%d %d %d \n",img[i],img[i+1],img[i+2]);
dist=sqrt(3*255*255);
index=0;
for(int j=0;j<50;j++) {
v1=img[i]-r[j];
v2=img[i+1]-g[j];
v3=img[i+2]-b[j];
dist_t=sqrt(v1*v1+v2*v2+v3*v3);
// printf("%f\n",dist_t);
if(dist_t<dist) {
dist=dist_t;
index=j;
}
}
// printf("index= %d\n", index);
//#pragma omp atomic
freq[index] += 1;
}
} //end of parallel region
printf("%s,",argv[1]); for(int i=0;i<50;i++) { printf("%9.9f",freq[i]/(double)(width*height)); if(i<49) printf(","); } printf("\n");
return 0;
}
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Definitions
*/
#define LCMSHDRI
#if !defined(MAGICKCORE_HDRI_SUPPORT)
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
#undef LCMSHDRI
#define LCMSScaleSource(pixel) ScaleQuantumToShort(pixel)
#define LCMSScaleTarget(pixel) ScaleShortToQuantum(pixel)
typedef unsigned short
LCMSType;
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
#undef LCMSHDRI
#define LCMSScaleSource(pixel) (pixel)
#define LCMSScaleTarget(pixel) (pixel)
typedef unsigned short
LCMSType;
#endif
#endif
#if defined(LCMSHDRI)
#define LCMSScaleSource(pixel) (source_scale*QuantumScale*(pixel))
#define LCMSScaleTarget(pixel) ClampToQuantum(target_scale*QuantumRange*(pixel))
typedef double
LCMSType;
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static LCMSType **DestroyPixelThreadSet(LCMSType **pixels)
{
register ssize_t
i;
if (pixels != (LCMSType **) NULL)
return((LCMSType **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (LCMSType *) NULL)
pixels[i]=(LCMSType *) RelinquishMagickMemory(pixels[i]);
pixels=(LCMSType **) RelinquishMagickMemory(pixels);
return(pixels);
}
static LCMSType **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
LCMSType
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(LCMSType **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (LCMSType **) NULL)
return((LCMSType **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(LCMSType *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (LCMSType *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) context;
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'",image->filename);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
/* Future.
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image,exception);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsHPROFILE
source_profile;
CMSExceptionInfo
cms_exception;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(CMSExceptionHandler);
cms_exception.image=image;
cms_exception.exception=exception;
(void) cms_exception;
source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
int
intent;
LCMSType
**magick_restrict source_pixels,
**magick_restrict target_pixels;
#if defined(LCMSHDRI)
LCMSType
source_scale,
target_scale;
#endif
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR((cmsContext)
&cms_exception,GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
#if defined(LCMSHDRI)
source_scale=1.0;
#endif
source_colorspace=sRGBColorspace;
source_channels=3;
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_channels=4;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_scale=100.0;
#else
source_type=(cmsUInt32Number) TYPE_CMYK_16;
#endif
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_channels=1;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_GRAY_DBL;
#else
source_type=(cmsUInt32Number) TYPE_GRAY_16;
#endif
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_Lab_DBL;
source_scale=100.0;
#else
source_type=(cmsUInt32Number) TYPE_Lab_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
break;
}
#endif
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_RGB_DBL;
#else
source_type=(cmsUInt32Number) TYPE_RGB_16;
#endif
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_XYZ_DBL;
#else
source_type=(cmsUInt32Number) TYPE_XYZ_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigYCbCrData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
break;
}
#endif
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
(void) source_colorspace;
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
#if defined(LCMSHDRI)
target_scale=1.0;
#endif
target_channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_channels=4;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_scale=0.01;
#else
target_type=(cmsUInt32Number) TYPE_CMYK_16;
#endif
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_channels=1;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_GRAY_DBL;
#else
target_type=(cmsUInt32Number) TYPE_GRAY_16;
#endif
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_Lab_DBL;
target_scale=0.01;
#else
target_type=(cmsUInt32Number) TYPE_Lab_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
break;
}
#endif
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_RGB_DBL;
#else
target_type=(cmsUInt32Number) TYPE_RGB_16;
#endif
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_XYZ_DBL;
#else
target_type=(cmsUInt32Number) TYPE_XYZ_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigYCbCrData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
break;
}
#endif
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (LCMSType **) NULL) ||
(target_pixels == (LCMSType **) NULL))
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register LCMSType
*p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=LCMSScaleSource(GetPixelRed(image,q));
if (source_channels > 1)
{
*p++=LCMSScaleSource(GetPixelGreen(image,q));
*p++=LCMSScaleSource(GetPixelBlue(image,q));
}
if (source_channels > 3)
*p++=LCMSScaleSource(GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_channels == 1)
SetPixelGray(image,LCMSScaleTarget(*p),q);
else
SetPixelRed(image,LCMSScaleTarget(*p),q);
p++;
if (target_channels > 1)
{
SetPixelGreen(image,LCMSScaleTarget(*p),q);
p++;
SetPixelBlue(image,LCMSScaleTarget(*p),q);
p++;
}
if (target_channels > 3)
{
SetPixelBlack(image,LCMSScaleTarget(*p),q);
p++;
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
#if defined(MAGICKCORE_XML_DELEGATE)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
return(MagickFalse);
xmlFreeDoc(document);
return(MagickTrue);
}
#else
return(MagickTrue);
#endif
}
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent],
property[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(profile) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"CorruptImageProfile","`%s'",name);
return(MagickTrue);
}
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%s:*",name);
(void) GetImageProperty(image,property,exception);
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
trmv_x_dia_n_hi.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
if(dis == 0)
{
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < m; ++j)
{
ALPHA_Number v;
alpha_mul(v, alpha, A->values[start + j]);
alpha_madde(tmp[threadId][j], v, x[j]);
}
}
else if(dis > 0)
{
const ALPHA_INT row_start = 0;
const ALPHA_INT col_start = dis;
const ALPHA_INT nnz = m - dis;
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Number v;
alpha_mul(v, alpha, A->values[start + j]);
alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
#include "main.h"
#define min(a, b) ((a<b)?a:b)
#define max(a, b) ((a>b)?a:b)
void parse(int argc, char* argv[], struct user_parameters* params)
{
int i;
for(i=1; i<argc; i++) {
if(!strcmp(argv[i], "-c"))
params->check = 1;
else if(!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) {
printf("----------------------------------------------\n");
printf("- KaStORS -\n");
printf("- Kaapi Starpu OpenMP Runtime task Suite -\n");
printf("----------------------------------------------\n");
printf("-h, --help : Show help information\n");
printf("-c : Ask to check result\n");
printf("-i : Number of iterations\n");
#ifdef TITER
printf("-r : Number ot timestep iteration\n");
#endif
#ifdef MSIZE
printf("-n : Matrix size\n");
#endif
#ifdef SMSIZE
printf("-m : SubMatrix size\n");
#endif
#ifdef BSIZE
printf("-b : Block size\n");
#endif
#ifdef IBSIZE
printf("-ib : Internal Block size\n");
#endif
#ifdef CUTOFF_SIZE
printf("-s : Cutoff (Size of the matrix)\n");
#endif
#ifdef CUTOFF_DEPTH
printf("-d : Cutoff (depth)\n");
#endif
exit(EXIT_SUCCESS);
} else if(!strcmp(argv[i], "-i")) {
if (++i < argc)
params->niter = atoi(argv[i]);
else {
fprintf(stderr, "-i requires a number\n");
exit(EXIT_FAILURE);
}
#ifdef TITER
} else if(!strcmp(argv[i], "-r")) {
if (++i < argc)
params->titer = atoi(argv[i]);
else {
fprintf(stderr, "-r requires a number\n");
exit(EXIT_FAILURE);
}
#endif
#ifdef MSIZE
} else if(!strcmp(argv[i], "-n")) {
if (++i < argc)
params->matrix_size = atoi(argv[i]);
else {
fprintf(stderr, "-n requires a number\n");
exit(EXIT_FAILURE);
}
#endif
#ifdef SMSIZE
} else if(!strcmp(argv[i], "-m")) {
if (++i < argc)
params->submatrix_size = atoi(argv[i]);
else {
fprintf(stderr, "-m requires a number\n");
exit(EXIT_FAILURE);
}
#endif
#ifdef BSIZE
} else if(!strcmp(argv[i], "-b")) {
if (++i < argc)
params->blocksize = atoi(argv[i]);
else {
fprintf(stderr, "-b requires a number\n");
exit(EXIT_FAILURE);
}
#endif
#ifdef IBSIZE
} else if(!strcmp(argv[i], "-ib")) {
if (++i < argc)
params->iblocksize = atoi(argv[i]);
else {
fprintf(stderr, "-ib requires a number\n");
exit(EXIT_FAILURE);
}
#endif
#ifdef CUTOFF_SIZE
} else if(!strcmp(argv[i], "-s")) {
if (++i < argc)
params->cutoff_size = atoi(argv[i]);
else {
fprintf(stderr, "-s requires a number\n");
exit(EXIT_FAILURE);
}
#endif
#ifdef CUTOFF_DEPTH
} else if(!strcmp(argv[i], "-d")) {
if (++i < argc)
params->cutoff_depth = atoi(argv[i]);
else {
fprintf(stderr, "-d requires a number\n");
exit(EXIT_FAILURE);
}
#endif
} else
fprintf(stderr, "Unknown parameter : %s\n", argv[i]);
}
}
int comp (const void * elem1, const void * elem2)
{
double f = *((double*)elem1);
double s = *((double*)elem2);
if (f > s) return 1;
if (f < s) return -1;
return 0;
}
int main(int argc, char* argv[])
{
int num_threads = 1;
struct user_parameters params;
memset(¶ms, 0, sizeof(params));
/* default value */
params.niter = 1;
parse(argc, argv, ¶ms);
// get Number of thread if OpenMP is activated
#ifdef _OPENMP
#pragma omp parallel
#pragma omp master
num_threads = omp_get_num_threads();
#endif
// warmup
run(¶ms);
double mean = 0.0;
double meansqr = 0.0;
double min_ = DBL_MAX;
double max_ = -1;
double* all_times = (double*)malloc(sizeof(double) * params.niter);
for (int i=0; i<params.niter; ++i)
{
double cur_time = run(¶ms);
all_times[i] = cur_time;
mean += cur_time;
min_ = min(min_, cur_time);
max_ = max(max_, cur_time);
meansqr += cur_time * cur_time;
}
mean /= params.niter;
meansqr /= params.niter;
double stddev = sqrt(meansqr - mean * mean);
qsort(all_times, params.niter, sizeof(double), comp);
double median = all_times[params.niter / 2];
free(all_times);
printf("Program : %s\n", argv[0]);
#ifdef MSIZE
printf("Size : %d\n", params.matrix_size);
#endif
#ifdef SMSIZE
printf("Submatrix size : %d\n", params.submatrix_size);
#endif
#ifdef BSIZE
printf("Blocksize : %d\n", params.blocksize);
#endif
#ifdef IBSIZE
printf("Internal Blocksize : %d\n", params.iblocksize);
#endif
#ifdef TITER
printf("Iteration time : %d\n", params.titer);
#endif
printf("Iterations : %d\n", params.niter);
#ifdef CUTOFF_SIZE
printf("Cutoff Size : %d\n", params.cutoff_size);
#endif
#ifdef CUTOFF_DEPTH
printf("Cutoff depth : %d\n", params.cutoff_depth);
#endif
printf("Threads : %d\n", num_threads);
#ifdef GFLOPS
printf("Gflops:: ");
#else
printf("Time(sec):: ");
#endif
printf("avg : %lf :: std : %lf :: min : %lf :: max : %lf :: median : %lf\n",
mean, stddev, min_, max_, median);
if(params.check)
printf("Check : %s\n", (params.succeed)?
((params.succeed > 1)?"not implemented":"success")
:"fail");
if (params.string2display !=0)
printf("%s", params.string2display);
printf("\n");
return 0;
}
|
GB_unop__identity_bool_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_int64)
// op(A') function: GB (_unop_tran__identity_bool_int64)
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_int64)
(
bool *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
clean.h | /****************************************************************************
* VCGLib o o *
* Visual and Computer Graphics Library o o *
* _ O _ *
* Copyright(C) 2004-2016 \/)\/ *
* Visual Computing Lab /\/| *
* ISTI - Italian National Research Council | *
* \ *
* All rights reserved. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License (http://www.gnu.org/licenses/gpl.txt) *
* for more details. *
* *
****************************************************************************/
#ifndef __VCGLIB_CLEAN
#define __VCGLIB_CLEAN
// VCG headers
#include <vcg/complex/complex.h>
#include <vcg/simplex/face/pos.h>
#include <vcg/simplex/face/topology.h>
#include <vcg/simplex/edge/topology.h>
#include <vcg/complex/algorithms/closest.h>
#include <vcg/space/index/grid_static_ptr.h>
#include <vcg/space/index/spatial_hashing.h>
#include <vcg/complex/algorithms/update/selection.h>
#include <vcg/complex/algorithms/update/flag.h>
#include <vcg/complex/algorithms/update/normal.h>
#include <vcg/complex/algorithms/update/topology.h>
#include <vcg/space/triangle3.h>
namespace vcg {
namespace tri{
template <class ConnectedMeshType>
class ConnectedComponentIterator
{
public:
typedef ConnectedMeshType MeshType;
typedef typename MeshType::VertexType VertexType;
typedef typename MeshType::VertexPointer VertexPointer;
typedef typename MeshType::VertexIterator VertexIterator;
typedef typename MeshType::ScalarType ScalarType;
typedef typename MeshType::FaceType FaceType;
typedef typename MeshType::FacePointer FacePointer;
typedef typename MeshType::FaceIterator FaceIterator;
typedef typename MeshType::ConstFaceIterator ConstFaceIterator;
typedef typename MeshType::FaceContainer FaceContainer;
public:
void operator ++()
{
FacePointer fpt=sf.top();
sf.pop();
for(int j=0;j<3;++j)
if( !face::IsBorder(*fpt,j) )
{
FacePointer l=fpt->FFp(j);
if( !tri::IsMarked(*mp,l) )
{
tri::Mark(*mp,l);
sf.push(l);
}
}
}
void start(MeshType &m, FacePointer p)
{
tri::RequirePerFaceMark(m);
mp=&m;
while(!sf.empty()) sf.pop();
UnMarkAll(m);
tri::Mark(m,p);
sf.push(p);
}
bool completed() {
return sf.empty();
}
FacePointer operator *()
{
return sf.top();
}
private:
std::stack<FacePointer> sf;
MeshType *mp;
};
///
/** \addtogroup trimesh */
/*@{*/
/// Class of static functions to clean//restore meshs.
template <class CleanMeshType>
class Clean
{
public:
typedef CleanMeshType MeshType;
typedef typename MeshType::VertexType VertexType;
typedef typename MeshType::VertexPointer VertexPointer;
typedef typename MeshType::VertexIterator VertexIterator;
typedef typename MeshType::ConstVertexIterator ConstVertexIterator;
typedef typename MeshType::EdgeIterator EdgeIterator;
typedef typename MeshType::EdgePointer EdgePointer;
typedef typename MeshType::CoordType CoordType;
typedef typename MeshType::ScalarType ScalarType;
typedef typename MeshType::FaceType FaceType;
typedef typename MeshType::FacePointer FacePointer;
typedef typename MeshType::FaceIterator FaceIterator;
typedef typename MeshType::ConstFaceIterator ConstFaceIterator;
typedef typename MeshType::FaceContainer FaceContainer;
typedef typename vcg::Box3<ScalarType> Box3Type;
typedef GridStaticPtr<FaceType, ScalarType > TriMeshGrid;
/* classe di confronto per l'algoritmo di eliminazione vertici duplicati*/
class RemoveDuplicateVert_Compare{
public:
inline bool operator()(VertexPointer const &a, VertexPointer const &b)
{
return ((*a).cP() == (*b).cP()) ? (a<b): ((*a).cP() < (*b).cP());
}
};
/** This function removes all duplicate vertices of the mesh by looking only at their spatial positions.
* Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
* the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateVertex( MeshType & m, bool RemoveDegenerateFlag=true) // V1.0
{
if(m.vert.size()==0 || m.vn==0) return 0;
std::map<VertexPointer, VertexPointer> mp;
size_t i,j;
VertexIterator vi;
int deleted=0;
int k=0;
size_t num_vert = m.vert.size();
std::vector<VertexPointer> perm(num_vert);
for(vi=m.vert.begin(); vi!=m.vert.end(); ++vi, ++k)
perm[k] = &(*vi);
RemoveDuplicateVert_Compare c_obj;
std::sort(perm.begin(),perm.end(),c_obj);
j = 0;
i = j;
mp[perm[i]] = perm[j];
++i;
for(;i!=num_vert;)
{
if( (! (*perm[i]).IsD()) &&
(! (*perm[j]).IsD()) &&
(*perm[i]).P() == (*perm[j]).cP() )
{
VertexPointer t = perm[i];
mp[perm[i]] = perm[j];
++i;
Allocator<MeshType>::DeleteVertex(m,*t);
deleted++;
}
else
{
j = i;
++i;
}
}
for(FaceIterator fi = m.face.begin(); fi!=m.face.end(); ++fi)
if( !(*fi).IsD() )
for(k = 0; k < (*fi).VN(); ++k)
if( mp.find( (typename MeshType::VertexPointer)(*fi).V(k) ) != mp.end() )
{
(*fi).V(k) = &*mp[ (*fi).V(k) ];
}
for(EdgeIterator ei = m.edge.begin(); ei!=m.edge.end(); ++ei)
if( !(*ei).IsD() )
for(k = 0; k < 2; ++k)
if( mp.find( (typename MeshType::VertexPointer)(*ei).V(k) ) != mp.end() )
{
(*ei).V(k) = &*mp[ (*ei).V(k) ];
}
if(RemoveDegenerateFlag) RemoveDegenerateFace(m);
if(RemoveDegenerateFlag && m.en>0) {
RemoveDegenerateEdge(m);
RemoveDuplicateEdge(m);
}
return deleted;
}
class SortedPair
{
public:
SortedPair() {}
SortedPair(unsigned int v0, unsigned int v1, EdgePointer _fp)
{
v[0]=v0;v[1]=v1;
fp=_fp;
if(v[0]>v[1]) std::swap(v[0],v[1]);
}
bool operator < (const SortedPair &p) const
{
return (v[1]!=p.v[1])?(v[1]<p.v[1]):
(v[0]<p.v[0]); }
bool operator == (const SortedPair &s) const
{
if( (v[0]==s.v[0]) && (v[1]==s.v[1]) ) return true;
return false;
}
unsigned int v[2];
EdgePointer fp;
};
class SortedTriple
{
public:
SortedTriple() {}
SortedTriple(unsigned int v0, unsigned int v1, unsigned int v2,FacePointer _fp)
{
v[0]=v0;v[1]=v1;v[2]=v2;
fp=_fp;
std::sort(v,v+3);
}
bool operator < (const SortedTriple &p) const
{
return (v[2]!=p.v[2])?(v[2]<p.v[2]):
(v[1]!=p.v[1])?(v[1]<p.v[1]):
(v[0]<p.v[0]); }
bool operator == (const SortedTriple &s) const
{
if( (v[0]==s.v[0]) && (v[1]==s.v[1]) && (v[2]==s.v[2]) ) return true;
return false;
}
unsigned int v[3];
FacePointer fp;
};
/** This function removes all duplicate faces of the mesh by looking only at their vertex reference.
So it should be called after unification of vertices.
Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateFace( MeshType & m) // V1.0
{
std::vector<SortedTriple> fvec;
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
if(!(*fi).IsD())
{
fvec.push_back(SortedTriple( tri::Index(m,(*fi).V(0)),
tri::Index(m,(*fi).V(1)),
tri::Index(m,(*fi).V(2)),
&*fi));
}
std::sort(fvec.begin(),fvec.end());
int total=0;
for(int i=0;i<int(fvec.size())-1;++i)
{
if(fvec[i]==fvec[i+1])
{
total++;
tri::Allocator<MeshType>::DeleteFace(m, *(fvec[i].fp) );
}
}
return total;
}
/** This function removes all duplicate faces of the mesh by looking only at their vertex reference.
So it should be called after unification of vertices.
Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateEdge( MeshType & m) // V1.0
{
if (m.en==0) return 0;
std::vector<SortedPair> eVec;
for(EdgeIterator ei=m.edge.begin();ei!=m.edge.end();++ei)
if(!(*ei).IsD())
{
eVec.push_back(SortedPair( tri::Index(m,(*ei).V(0)), tri::Index(m,(*ei).V(1)), &*ei));
}
std::sort(eVec.begin(),eVec.end());
int total=0;
for(int i=0;i<int(eVec.size())-1;++i)
{
if(eVec[i]==eVec[i+1])
{
total++;
tri::Allocator<MeshType>::DeleteEdge(m, *(eVec[i].fp) );
}
}
return total;
}
static int CountUnreferencedVertex( MeshType& m)
{
return RemoveUnreferencedVertex(m,false);
}
/** This function removes that are not referenced by any face. The function updates the vn counter.
@param m The mesh
@return The number of removed vertices
*/
static int RemoveUnreferencedVertex( MeshType& m, bool DeleteVertexFlag=true) // V1.0
{
FaceIterator fi;
EdgeIterator ei;
VertexIterator vi;
int referredBit = VertexType::NewBitFlag();
int j;
int deleted = 0;
for(vi=m.vert.begin();vi!=m.vert.end();++vi)
(*vi).ClearUserBit(referredBit);
for(fi=m.face.begin();fi!=m.face.end();++fi)
if( !(*fi).IsD() )
for(j=0;j<(*fi).VN();++j)
(*fi).V(j)->SetUserBit(referredBit);
for(ei=m.edge.begin();ei!=m.edge.end();++ei)
if( !(*ei).IsD() ){
(*ei).V(0)->SetUserBit(referredBit);
(*ei).V(1)->SetUserBit(referredBit);
}
for(vi=m.vert.begin();vi!=m.vert.end();++vi)
if( (!(*vi).IsD()) && (!(*vi).IsUserBit(referredBit)))
{
if(DeleteVertexFlag) Allocator<MeshType>::DeleteVertex(m,*vi);
++deleted;
}
VertexType::DeleteBitFlag(referredBit);
return deleted;
}
/**
Degenerate vertices are vertices that have coords with invalid floating point values,
All the faces incident on deleted vertices are also deleted
*/
static int RemoveDegenerateVertex(MeshType& m)
{
VertexIterator vi;
int count_vd = 0;
for(vi=m.vert.begin(); vi!=m.vert.end();++vi)
if(math::IsNAN( (*vi).P()[0]) ||
math::IsNAN( (*vi).P()[1]) ||
math::IsNAN( (*vi).P()[2]) )
{
count_vd++;
Allocator<MeshType>::DeleteVertex(m,*vi);
}
FaceIterator fi;
int count_fd = 0;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD())
if( (*fi).V(0)->IsD() ||
(*fi).V(1)->IsD() ||
(*fi).V(2)->IsD() )
{
count_fd++;
Allocator<MeshType>::DeleteFace(m,*fi);
}
return count_vd;
}
/**
Degenerate faces are faces that are Topologically degenerate,
i.e. have two or more vertex reference that link the same vertex
(and not only two vertexes with the same coordinates).
All Degenerate faces are zero area faces BUT not all zero area faces are degenerate.
We do not take care of topology because when we have degenerate faces the
topology calculation functions crash.
*/
static int RemoveDegenerateFace(MeshType& m)
{
int count_fd = 0;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD())
{
if((*fi).V(0) == (*fi).V(1) ||
(*fi).V(0) == (*fi).V(2) ||
(*fi).V(1) == (*fi).V(2) )
{
count_fd++;
Allocator<MeshType>::DeleteFace(m,*fi);
}
}
return count_fd;
}
static int RemoveDegenerateEdge(MeshType& m)
{
int count_ed = 0;
for(EdgeIterator ei=m.edge.begin(); ei!=m.edge.end();++ei)
if(!(*ei).IsD())
{
if((*ei).V(0) == (*ei).V(1) )
{
count_ed++;
Allocator<MeshType>::DeleteEdge(m,*ei);
}
}
return count_ed;
}
static int RemoveNonManifoldVertex(MeshType& m)
{
CountNonManifoldVertexFF(m,true);
tri::UpdateSelection<MeshType>::FaceFromVertexLoose(m);
int count_removed = 0;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD() && (*fi).IsS())
Allocator<MeshType>::DeleteFace(m,*fi);
for(VertexIterator vi=m.vert.begin(); vi!=m.vert.end();++vi)
if(!(*vi).IsD() && (*vi).IsS()) {
++count_removed;
Allocator<MeshType>::DeleteVertex(m,*vi);
}
return count_removed;
}
static int SplitSelectedVertexOnEdgeMesh(MeshType& m)
{
tri::RequireCompactness(m);
tri::UpdateFlags<MeshType>::VertexClearV(m);
int count_split = 0;
for(size_t i=0;i<m.edge.size();++i)
{
for(int j=0;j<2;++j)
{
VertexPointer vp = m.edge[i].V(j);
if(vp->IsS())
{
if(!vp->IsV())
{
m.edge[i].V(j) = &*(tri::Allocator<MeshType>::AddVertex(m,vp->P()));
++count_split;
}
else
{
vp->SetV();
}
}
}
}
return count_split;
}
static void SelectNonManifoldVertexOnEdgeMesh(MeshType &m)
{
tri::RequireCompactness(m);
tri::UpdateSelection<MeshType>::VertexClear(m);
std::vector<int> cnt(m.vn,0);
for(size_t i=0;i<m.edge.size();++i)
{
cnt[tri::Index(m,m.edge[i].V(0))]++;
cnt[tri::Index(m,m.edge[i].V(1))]++;
}
for(size_t i=0;i<m.vert.size();++i)
if(cnt[i]>2) m.vert[i].SetS();
}
static void SelectCreaseVertexOnEdgeMesh(MeshType &m, ScalarType AngleRadThr)
{
tri::RequireCompactness(m);
tri::RequireVEAdjacency(m);
tri::UpdateTopology<MeshType>::VertexEdge(m);
for(size_t i=0;i<m.vert.size();++i)
{
std::vector<VertexPointer> VVStarVec;
edge::VVStarVE(&(m.vert[i]),VVStarVec);
if(VVStarVec.size()==2)
{
CoordType v0 = m.vert[i].P() - VVStarVec[0]->P();
CoordType v1 = m.vert[i].P() - VVStarVec[1]->P();
float angle = M_PI-vcg::Angle(v0,v1);
if(angle > AngleRadThr) m.vert[i].SetS();
}
}
}
/// Removal of faces that were incident on a non manifold edge.
// Given a mesh with FF adjacency
// it search for non manifold vertices and duplicate them.
// Duplicated vertices are moved apart according to the move threshold param.
// that is a percentage of the average vector from the non manifold vertex to the barycenter of the incident faces.
static int SplitNonManifoldVertex(MeshType& m, ScalarType moveThreshold)
{
RequireFFAdjacency(m);
typedef std::pair<FacePointer,int> FaceInt; // a face and the index of the vertex that we have to change
//
std::vector<std::pair<VertexPointer, std::vector<FaceInt> > >ToSplitVec;
SelectionStack<MeshType> ss(m);
ss.push();
CountNonManifoldVertexFF(m,true);
UpdateFlags<MeshType>::VertexClearV(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0;i<3;i++)
if((*fi).V(i)->IsS() && !(*fi).V(i)->IsV())
{
(*fi).V(i)->SetV();
face::Pos<FaceType> startPos(&*fi,i);
face::Pos<FaceType> curPos = startPos;
std::set<FaceInt> faceSet;
do
{
faceSet.insert(std::make_pair(curPos.F(),curPos.VInd()));
curPos.NextE();
} while (curPos != startPos);
ToSplitVec.push_back(make_pair((*fi).V(i),std::vector<FaceInt>()));
typename std::set<FaceInt>::const_iterator iii;
for(iii=faceSet.begin();iii!=faceSet.end();++iii)
ToSplitVec.back().second.push_back(*iii);
}
}
ss.pop();
// Second step actually add new vertices and split them.
typename tri::Allocator<MeshType>::template PointerUpdater<VertexPointer> pu;
VertexIterator firstVp = tri::Allocator<MeshType>::AddVertices(m,ToSplitVec.size(),pu);
for(size_t i =0;i<ToSplitVec.size();++i)
{
// qDebug("Splitting Vertex %i",ToSplitVec[i].first-&*m.vert.begin());
VertexPointer np=ToSplitVec[i].first;
pu.Update(np);
firstVp->ImportData(*np);
// loop on the face to be changed, and also compute the movement vector;
CoordType delta(0,0,0);
for(size_t j=0;j<ToSplitVec[i].second.size();++j)
{
FaceInt ff=ToSplitVec[i].second[j];
ff.first->V(ff.second)=&*firstVp;
delta+=Barycenter(*(ff.first))-np->cP();
}
delta /= ToSplitVec[i].second.size();
firstVp->P() = firstVp->P() + delta * moveThreshold;
firstVp++;
}
return ToSplitVec.size();
}
// Auxiliary function for sorting the non manifold faces according to their area. Used in RemoveNonManifoldFace
struct CompareAreaFP {
bool operator ()(FacePointer const& f1, FacePointer const& f2) const {
return DoubleArea(*f1) < DoubleArea(*f2);
}
};
/// Removal of faces that were incident on a non manifold edge.
static int RemoveNonManifoldFace(MeshType& m)
{
FaceIterator fi;
int count_fd = 0;
std::vector<FacePointer> ToDelVec;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if (!fi->IsD())
{
if ((!IsManifold(*fi,0))||
(!IsManifold(*fi,1))||
(!IsManifold(*fi,2)))
ToDelVec.push_back(&*fi);
}
std::sort(ToDelVec.begin(),ToDelVec.end(),CompareAreaFP());
for(size_t i=0;i<ToDelVec.size();++i)
{
if(!ToDelVec[i]->IsD())
{
FaceType &ff= *ToDelVec[i];
if ((!IsManifold(ff,0))||
(!IsManifold(ff,1))||
(!IsManifold(ff,2)))
{
for(int j=0;j<3;++j)
if(!face::IsBorder<FaceType>(ff,j))
vcg::face::FFDetach<FaceType>(ff,j);
Allocator<MeshType>::DeleteFace(m,ff);
count_fd++;
}
}
}
return count_fd;
}
/* Remove the faces that are out of a given range of area */
static int RemoveFaceOutOfRangeArea(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)(), bool OnlyOnSelected=false)
{
int count_fd = 0;
MinAreaThr*=2;
MaxAreaThr*=2;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi){
if(!(*fi).IsD())
if(!OnlyOnSelected || (*fi).IsS())
{
const ScalarType doubleArea=DoubleArea<FaceType>(*fi);
if((doubleArea<=MinAreaThr) || (doubleArea>=MaxAreaThr) )
{
Allocator<MeshType>::DeleteFace(m,*fi);
count_fd++;
}
}
}
return count_fd;
}
static int RemoveZeroAreaFace(MeshType& m) { return RemoveFaceOutOfRangeArea(m,0);}
/**
* Is the mesh only composed by quadrilaterals?
*/
static bool IsBitQuadOnly(const MeshType &m)
{
typedef typename MeshType::FaceType F;
tri::RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->Flags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp != F::FAUX0 && tmp != F::FAUX1 && tmp != F::FAUX2) return false;
}
return true;
}
static bool IsFaceFauxConsistent(MeshType &m)
{
RequirePerFaceFlags(m);
RequireFFAdjacency(m);
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD())
{
for(int z=0;z<(*fi).VN();++z)
{
FacePointer fp = fi->FFp(z);
int zp = fi->FFi(z);
if(fi->IsF(z) != fp->IsF(zp)) return false;
}
}
return true;
}
/**
* Is the mesh only composed by triangles? (non polygonal faces)
*/
static bool IsBitTriOnly(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) {
if ( !fi->IsD() && fi->IsAnyF() ) return false;
}
return true;
}
static bool IsBitPolygonal(const MeshType &m){
return !IsBitTriOnly(m);
}
/**
* Is the mesh only composed by quadrilaterals and triangles? (no pentas, etc)
* It assumes that the bits are consistent. In that case there can be only a single faux edge.
*/
static bool IsBitTriQuadOnly(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
typedef typename MeshType::FaceType F;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp!=F::FAUX0 && tmp!=F::FAUX1 && tmp!=F::FAUX2 && tmp!=0 ) return false;
}
return true;
}
/**
* How many quadrilaterals?
* It assumes that the bits are consistent. In that case we count the tris with a single faux edge and divide by two.
*/
static int CountBitQuads(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
typedef typename MeshType::FaceType F;
int count=0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp==F::FAUX0 || tmp==F::FAUX1 || tmp==F::FAUX2) count++;
}
return count / 2;
}
/**
* How many triangles? (non polygonal faces)
*/
static int CountBitTris(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
int count=0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
if (!(fi->IsAnyF())) count++;
}
return count;
}
/**
* How many polygons of any kind? (including triangles)
* it assumes that there are no faux vertexes (e.g vertices completely surrounded by faux edges)
*/
static int CountBitPolygons(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
int count = 0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
if (fi->IsF(0)) count++;
if (fi->IsF(1)) count++;
if (fi->IsF(2)) count++;
}
return m.fn - count/2;
}
/**
* The number of polygonal faces is
* FN - EN_f (each faux edge hides exactly one triangular face or in other words a polygon of n edges has n-3 faux edges.)
* In the general case where a The number of polygonal faces is
* FN - EN_f + VN_f
* where:
* EN_f is the number of faux edges.
* VN_f is the number of faux vertices (e.g vertices completely surrounded by faux edges)
* as a intuitive proof think to a internal vertex that is collapsed onto a border of a polygon:
* it deletes 2 faces, 1 faux edges and 1 vertex so to keep the balance you have to add back the removed vertex.
*/
static int CountBitLargePolygons(MeshType &m)
{
tri::RequirePerFaceFlags(m);
UpdateFlags<MeshType>::VertexSetV(m);
// First loop Clear all referenced vertices
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD())
for(int i=0;i<3;++i) fi->V(i)->ClearV();
// Second Loop, count (twice) faux edges and mark all vertices touched by non faux edges
// (e.g vertexes on the boundary of a polygon)
int countE = 0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD()) {
for(int i=0;i<3;++i)
{
if (fi->IsF(i))
countE++;
else
{
fi->V0(i)->SetV();
fi->V1(i)->SetV();
}
}
}
// Third Loop, count the number of referenced vertexes that are completely surrounded by faux edges.
int countV = 0;
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi)
if (!vi->IsD() && !vi->IsV()) countV++;
return m.fn - countE/2 + countV ;
}
/**
* Checks that the mesh has consistent per-face faux edges
* (the ones that merges triangles into larger polygons).
* A border edge should never be faux, and faux edges should always be
* reciprocated by another faux edges.
* It requires FF adjacency.
*/
static bool HasConsistentPerFaceFauxFlag(const MeshType &m)
{
RequireFFAdjacency(m);
RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
for (int k=0; k<3; k++)
if( ( fi->IsF(k) != fi->cFFp(k)->IsF(fi->cFFi(k)) ) ||
( fi->IsF(k) && face::IsBorder(*fi,k)) )
{
return false;
}
return true;
}
/**
* Count the number of non manifold edges in a polylinemesh, e.g. the edges where there are more than 2 incident faces.
*
*/
static int CountNonManifoldEdgeEE( MeshType & m, bool SelectFlag=false)
{
MeshAssert<MeshType>::OnlyEdgeMesh(m);
RequireEEAdjacency(m);
tri::UpdateTopology<MeshType>::EdgeEdge(m);
if(SelectFlag) UpdateSelection<MeshType>::VertexClear(m);
int nonManifoldCnt=0;
SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0);
// First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter.
EdgeIterator ei;
for (ei = m.edge.begin(); ei != m.edge.end(); ++ei) if (!ei->IsD())
{
TD[(*ei).V(0)]++;
TD[(*ei).V(1)]++;
}
tri::UpdateFlags<MeshType>::VertexClearV(m);
// Second Loop, Check that each vertex have been seen 1 or 2 times.
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if (!vi->IsD())
{
if( TD[vi] >2 )
{
if(SelectFlag) (*vi).SetS();
nonManifoldCnt++;
}
}
return nonManifoldCnt;
}
/**
* Count the number of non manifold edges in a mesh, e.g. the edges where there are more than 2 incident faces.
*
* Note that this test is not enough to say that a mesh is two manifold,
* you have to count also the non manifold vertexes.
*/
static int CountNonManifoldEdgeFF( MeshType & m, bool SelectFlag=false)
{
RequireFFAdjacency(m);
int nmfBit[3];
nmfBit[0]= FaceType::NewBitFlag();
nmfBit[1]= FaceType::NewBitFlag();
nmfBit[2]= FaceType::NewBitFlag();
UpdateFlags<MeshType>::FaceClear(m,nmfBit[0]+nmfBit[1]+nmfBit[2]);
if(SelectFlag){
UpdateSelection<MeshType>::VertexClear(m);
UpdateSelection<MeshType>::FaceClear(m);
}
int edgeCnt = 0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD())
{
for(int i=0;i<3;++i)
if(!IsManifold(*fi,i))
{
if(!(*fi).IsUserBit(nmfBit[i]))
{
++edgeCnt;
if(SelectFlag)
{
(*fi).V0(i)->SetS();
(*fi).V1(i)->SetS();
}
// follow the ring of faces incident on edge i;
face::Pos<FaceType> nmf(&*fi,i);
do
{
if(SelectFlag) nmf.F()->SetS();
nmf.F()->SetUserBit(nmfBit[nmf.E()]);
nmf.NextF();
}
while(nmf.f != &*fi);
}
}
}
}
return edgeCnt;
}
/** Count (and eventually select) non 2-Manifold vertexes of a mesh
* e.g. the vertices with a non 2-manif. neighbourhood but that do not belong to not 2-manif edges.
* typical situation two cones connected by one vertex.
*/
static int CountNonManifoldVertexFF( MeshType & m, bool selectVert = true )
{
RequireFFAdjacency(m);
if(selectVert) UpdateSelection<MeshType>::VertexClear(m);
int nonManifoldCnt=0;
SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0);
// First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter.
FaceIterator fi;
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
TD[(*fi).V(0)]++;
TD[(*fi).V(1)]++;
TD[(*fi).V(2)]++;
}
tri::UpdateFlags<MeshType>::VertexClearV(m);
// Second Loop.
// mark out of the game the vertexes that are incident on non manifold edges.
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0;i<3;++i)
if (!IsManifold(*fi,i)) {
(*fi).V0(i)->SetV();
(*fi).V1(i)->SetV();
}
}
// Third Loop, for safe vertexes, check that the number of faces that you can reach starting
// from it and using FF is the same of the previously counted.
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0;i<3;i++) if(!(*fi).V(i)->IsV()){
(*fi).V(i)->SetV();
face::Pos<FaceType> pos(&(*fi),i);
int starSizeFF = pos.NumberOfIncidentFaces();
if (starSizeFF != TD[(*fi).V(i)])
{
if(selectVert) (*fi).V(i)->SetS();
nonManifoldCnt++;
}
}
}
return nonManifoldCnt;
}
/// Very simple test of water tightness. No boundary and no non manifold edges.
/// Assume that it is orientable.
/// It could be debated if a closed non orientable surface is watertight or not.
///
/// The rationale of not testing orientability here is that
/// it requires FFAdj while this test do not require any adjacency.
///
static bool IsWaterTight(MeshType & m)
{
int edgeNum=0,edgeBorderNum=0,edgeNonManifNum=0;
CountEdgeNum(m, edgeNum, edgeBorderNum,edgeNonManifNum);
return (edgeBorderNum==0) && (edgeNonManifNum==0);
}
static void CountEdgeNum( MeshType & m, int &total_e, int &boundary_e, int &non_manif_e )
{
std::vector< typename tri::UpdateTopology<MeshType>::PEdge > edgeVec;
tri::UpdateTopology<MeshType>::FillEdgeVector(m,edgeVec,true);
sort(edgeVec.begin(), edgeVec.end()); // Lo ordino per vertici
total_e=0;
boundary_e=0;
non_manif_e=0;
size_t f_on_cur_edge =1;
for(size_t i=0;i<edgeVec.size();++i)
{
if(( (i+1) == edgeVec.size()) || !(edgeVec[i] == edgeVec[i+1]))
{
++total_e;
if(f_on_cur_edge==1)
++boundary_e;
if(f_on_cur_edge>2)
++non_manif_e;
f_on_cur_edge=1;
}
else
{
++f_on_cur_edge;
}
} // end for
}
static int CountHoles( MeshType & m)
{
UpdateFlags<MeshType>::FaceClearV(m);
int loopNum=0;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi) if(!fi->IsD())
{
for(int j=0;j<3;++j)
{
if(!fi->IsV() && face::IsBorder(*fi,j))
{
face::Pos<FaceType> startPos(&*fi,j);
face::Pos<FaceType> curPos=startPos;
do
{
curPos.NextB();
curPos.F()->SetV();
}
while(curPos!=startPos);
++loopNum;
}
}
}
return loopNum;
}
/*
Compute the set of connected components of a given mesh
it fills a vector of pair < int , faceptr > with, for each connecteed component its size and a represnant
*/
static int CountConnectedComponents(MeshType &m)
{
std::vector< std::pair<int,FacePointer> > CCV;
return ConnectedComponents(m,CCV);
}
static int ConnectedComponents(MeshType &m, std::vector< std::pair<int,FacePointer> > &CCV)
{
tri::RequireFFAdjacency(m);
CCV.clear();
tri::UpdateFlags<MeshType>::FaceClearV(m);
std::stack<FacePointer> sf;
FacePointer fpt=&*(m.face.begin());
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
{
if(!((*fi).IsD()) && !(*fi).IsV())
{
(*fi).SetV();
CCV.push_back(std::make_pair(0,&*fi));
sf.push(&*fi);
while (!sf.empty())
{
fpt=sf.top();
++CCV.back().first;
sf.pop();
for(int j=0;j<3;++j)
{
if( !face::IsBorder(*fpt,j) )
{
FacePointer l = fpt->FFp(j);
if( !(*l).IsV() )
{
(*l).SetV();
sf.push(l);
}
}
}
}
}
}
return int(CCV.size());
}
static void ComputeValence( MeshType &m, typename MeshType::PerVertexIntHandle &h)
{
for(VertexIterator vi=m.vert.begin(); vi!= m.vert.end();++vi)
h[vi]=0;
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
{
if(!((*fi).IsD()))
for(int j=0;j<fi->VN();j++)
++h[tri::Index(m,fi->V(j))];
}
}
/**
GENUS.
A topologically invariant property of a surface defined as
the largest number of non-intersecting simple closed curves that can be
drawn on the surface without separating it.
Roughly speaking, it is the number of holes in a surface.
The genus g of a closed surface, also called the geometric genus, is related to the
Euler characteristic by the relation $chi$ by $chi==2-2g$.
The genus of a connected, orientable surface is an integer representing the maximum
number of cuttings along closed simple curves without rendering the resultant
manifold disconnected. It is equal to the number of handles on it.
For general polyhedra the <em>Euler Formula</em> is:
V - E + F = 2 - 2G - B
where V is the number of vertices, F is the number of faces, E is the
number of edges, G is the genus and B is the number of <em>boundary polygons</em>.
The above formula is valid for a mesh with one single connected component.
By considering multiple connected components the formula becomes:
V - E + F = 2C - 2Gs - B -> 2Gs = - ( V-E+F +B -2C)
where C is the number of connected components and Gs is the sum of
the genus of all connected components.
Note that in the case of a mesh with boundaries the intuitive meaning of Genus is less intuitive that it could seem.
A closed sphere, a sphere with one hole (e.g. a disk) and a sphere with two holes (e.g. a tube) all of them have Genus == 0
*/
static int MeshGenus(int nvert,int nedges,int nfaces, int numholes, int numcomponents)
{
return -((nvert + nfaces - nedges + numholes - 2 * numcomponents) / 2);
}
static int MeshGenus(MeshType &m)
{
int nvert=m.vn;
int nfaces=m.fn;
int boundary_e,total_e,nonmanif_e;
CountEdgeNum(m,total_e,boundary_e,nonmanif_e);
int numholes=CountHoles(m);
int numcomponents=CountConnectedComponents(m);
int G=MeshGenus(nvert,total_e,nfaces,numholes,numcomponents);
return G;
}
/**
* Check if the given mesh is regular, semi-regular or irregular.
*
* Each vertex of a \em regular mesh has valence 6 except for border vertices
* which have valence 4.
*
* A \em semi-regular mesh is derived from an irregular one applying
* 1-to-4 subdivision recursively. (not checked for now)
*
* All other meshes are \em irregular.
*/
static void IsRegularMesh(MeshType &m, bool &Regular, bool &Semiregular)
{
RequireVFAdjacency(m);
Regular = true;
VertexIterator vi;
// for each vertex the number of edges are count
for (vi = m.vert.begin(); vi != m.vert.end(); ++vi)
{
if (!vi->IsD())
{
face::Pos<FaceType> he((*vi).VFp(), &*vi);
face::Pos<FaceType> ht = he;
int n=0;
bool border=false;
do
{
++n;
ht.NextE();
if (ht.IsBorder())
border=true;
}
while (ht != he);
if (border)
n = n/2;
if ((n != 6)&&(!border && n != 4))
{
Regular = false;
break;
}
}
}
if (!Regular)
Semiregular = false;
else
{
// For now we do not account for semi-regularity
Semiregular = false;
}
}
static bool IsCoherentlyOrientedMesh(MeshType &m)
{
RequireFFAdjacency(m);
MeshAssert<MeshType>::FFAdjacencyIsInitialized(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD())
for(int i=0;i<3;++i)
if(!face::CheckOrientation(*fi,i))
return false;
return true;
}
static void OrientCoherentlyMesh(MeshType &m, bool &_IsOriented, bool &_IsOrientable)
{
RequireFFAdjacency(m);
MeshAssert<MeshType>::FFAdjacencyIsInitialized(m);
bool IsOrientable = true;
bool IsOriented = true;
UpdateFlags<MeshType>::FaceClearV(m);
std::stack<FacePointer> faces;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD() && !fi->IsV())
{
// each face put in the stack is selected (and oriented)
fi->SetV();
faces.push(&(*fi));
while (!faces.empty())
{
FacePointer fp = faces.top();
faces.pop();
// make consistently oriented the adjacent faces
for (int j = 0; j < 3; j++)
{
if (!face::IsBorder(*fp,j) && face::IsManifold<FaceType>(*fp, j))
{
FacePointer fpaux = fp->FFp(j);
int iaux = fp->FFi(j);
if (!CheckOrientation(*fpaux, iaux))
{
IsOriented = false;
if (!fpaux->IsV())
face::SwapEdge<FaceType,true>(*fpaux, iaux);
else
{
IsOrientable = false;
break;
}
}
if (!fpaux->IsV())
{
fpaux->SetV();
faces.push(fpaux);
}
}
}
}
}
if (!IsOrientable) break;
}
_IsOriented = IsOriented;
_IsOrientable = IsOrientable;
}
/// Flip the orientation of the whole mesh flipping all the faces (by swapping the first two vertices)
static void FlipMesh(MeshType &m, bool selected=false)
{
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD())
if(!selected || (*fi).IsS())
{
face::SwapEdge<FaceType,false>((*fi), 0);
if (HasPerWedgeTexCoord(m))
std::swap((*fi).WT(0),(*fi).WT(1));
}
}
/// Flip a mesh so that its normals are orented outside.
/// Just for safety it uses a voting scheme.
/// It assumes that
/// mesh has already has coherent normals.
/// mesh is watertight and signle component.
static bool FlipNormalOutside(MeshType &m)
{
if(m.vert.empty()) return false;
tri::UpdateNormal<MeshType>::PerVertexAngleWeighted(m);
tri::UpdateNormal<MeshType>::NormalizePerVertex(m);
std::vector< VertexPointer > minVertVec;
std::vector< VertexPointer > maxVertVec;
// The set of directions to be choosen
std::vector< CoordType > dirVec;
dirVec.push_back(CoordType(1,0,0));
dirVec.push_back(CoordType(0,1,0));
dirVec.push_back(CoordType(0,0,1));
dirVec.push_back(CoordType( 1, 1,1));
dirVec.push_back(CoordType(-1, 1,1));
dirVec.push_back(CoordType(-1,-1,1));
dirVec.push_back(CoordType( 1,-1,1));
for(size_t i=0;i<dirVec.size();++i)
{
Normalize(dirVec[i]);
minVertVec.push_back(&*m.vert.begin());
maxVertVec.push_back(&*m.vert.begin());
}
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if(!(*vi).IsD())
{
for(size_t i=0;i<dirVec.size();++i)
{
if( (*vi).cP().dot(dirVec[i]) < minVertVec[i]->P().dot(dirVec[i])) minVertVec[i] = &*vi;
if( (*vi).cP().dot(dirVec[i]) > maxVertVec[i]->P().dot(dirVec[i])) maxVertVec[i] = &*vi;
}
}
int voteCount=0;
ScalarType angleThreshold = cos(math::ToRad(85.0));
for(size_t i=0;i<dirVec.size();++i)
{
// qDebug("Min vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],minVertVec[i]->P()[0],minVertVec[i]->P()[1],minVertVec[i]->P()[2]);
// qDebug("Max vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],maxVertVec[i]->P()[0],maxVertVec[i]->P()[1],maxVertVec[i]->P()[2]);
if(minVertVec[i]->N().dot(dirVec[i]) > angleThreshold ) voteCount++;
if(maxVertVec[i]->N().dot(dirVec[i]) < -angleThreshold ) voteCount++;
}
// qDebug("votecount = %i",voteCount);
if(voteCount < int(dirVec.size())/2) return false;
FlipMesh(m);
return true;
}
// Search and remove small single triangle folds
// - a face has normal opposite to all other faces
// - choose the edge that brings to the face f1 containing the vertex opposite to that edge.
static int RemoveFaceFoldByFlip(MeshType &m, float normalThresholdDeg=175, bool repeat=true)
{
RequireFFAdjacency(m);
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UpdateTopology<MeshType>::FaceFace(m);
tri::UnMarkAll(m);
count = 0;
ScalarType NormalThrRad = math::ToRad(normalThresholdDeg);
ScalarType eps = 0.0001; // this epsilon value is in absolute value. It is a distance from edge in baricentric coords.
//detection stage
for(FaceIterator fi=m.face.begin();fi!= m.face.end();++fi ) if(!(*fi).IsV())
{ Point3<ScalarType> NN = vcg::TriangleNormal((*fi)).Normalize();
if( vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(0)).Normalize()) > NormalThrRad &&
vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(1)).Normalize()) > NormalThrRad &&
vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(2)).Normalize()) > NormalThrRad )
{
(*fi).SetS();
//(*fi).C()=Color4b(Color4b::Red);
// now search the best edge to flip
for(int i=0;i<3;i++)
{
Point3<ScalarType> &p=(*fi).P2(i);
Point3<ScalarType> L;
bool ret = vcg::InterpolationParameters((*(*fi).FFp(i)),TriangleNormal(*(*fi).FFp(i)),p,L);
if(ret && L[0]>eps && L[1]>eps && L[2]>eps)
{
(*fi).FFp(i)->SetS();
(*fi).FFp(i)->SetV();
//(*fi).FFp(i)->C()=Color4b(Color4b::Green);
if(face::CheckFlipEdge<FaceType>( *fi, i )) {
face::FlipEdge<FaceType>( *fi, i );
++count; ++total;
}
}
}
}
}
// tri::UpdateNormal<MeshType>::PerFace(m);
}
while( repeat && count );
return total;
}
static int RemoveTVertexByFlip(MeshType &m, float threshold=40, bool repeat=true)
{
RequireFFAdjacency(m);
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UpdateTopology<MeshType>::FaceFace(m);
tri::UnMarkAll(m);
count = 0;
//detection stage
for(unsigned int index = 0 ; index < m.face.size(); ++index )
{
FacePointer f = &(m.face[index]); float sides[3]; CoordType dummy;
sides[0] = Distance(f->P(0), f->P(1));
sides[1] = Distance(f->P(1), f->P(2));
sides[2] = Distance(f->P(2), f->P(0));
// Find largest triangle side
int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides);
if( tri::IsMarked(m,f->V2(i) )) continue;
if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] )
{
tri::Mark(m,f->V2(i));
if(face::CheckFlipEdge<FaceType>( *f, i )) {
// Check if EdgeFlipping improves quality
FacePointer g = f->FFp(i); int k = f->FFi(i);
Triangle3<ScalarType> t1(f->P(i), f->P1(i), f->P2(i)), t2(g->P(k), g->P1(k), g->P2(k)),
t3(f->P(i), g->P2(k), f->P2(i)), t4(g->P(k), f->P2(i), g->P2(k));
if ( std::min( QualityFace(t1), QualityFace(t2) ) < std::min( QualityFace(t3), QualityFace(t4) ))
{
face::FlipEdge<FaceType>( *f, i );
++count; ++total;
}
}
}
}
// tri::UpdateNormal<MeshType>::PerFace(m);
}
while( repeat && count );
return total;
}
static int RemoveTVertexByCollapse(MeshType &m, float threshold=40, bool repeat=true)
{
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UnMarkAll(m);
count = 0;
//detection stage
for(unsigned int index = 0 ; index < m.face.size(); ++index )
{
FacePointer f = &(m.face[index]);
float sides[3];
CoordType dummy;
sides[0] = Distance(f->P(0), f->P(1));
sides[1] = Distance(f->P(1), f->P(2));
sides[2] = Distance(f->P(2), f->P(0));
int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides);
if( tri::IsMarked(m,f->V2(i) )) continue;
if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] )
{
tri::Mark(m,f->V2(i));
int j = Distance(dummy,f->P(i))<Distance(dummy,f->P1(i))?i:(i+1)%3;
f->P2(i) = f->P(j); tri::Mark(m,f->V(j));
++count; ++total;
}
}
tri::Clean<MeshType>::RemoveDuplicateVertex(m);
tri::Allocator<MeshType>::CompactFaceVector(m);
tri::Allocator<MeshType>::CompactVertexVector(m);
}
while( repeat && count );
return total;
}
static bool SelfIntersections(MeshType &m, std::vector<FaceType*> &ret)
{
RequirePerFaceMark(m);
ret.clear();
int referredBit = FaceType::NewBitFlag();
tri::UpdateFlags<MeshType>::FaceClear(m,referredBit);
TriMeshGrid gM;
gM.Set(m.face.begin(),m.face.end());
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD())
{
(*fi).SetUserBit(referredBit);
Box3< ScalarType> bbox;
(*fi).GetBBox(bbox);
std::vector<FaceType*> inBox;
vcg::tri::GetInBoxFace(m, gM, bbox,inBox);
bool Intersected=false;
typename std::vector<FaceType*>::iterator fib;
for(fib=inBox.begin();fib!=inBox.end();++fib)
{
if(!(*fib)->IsUserBit(referredBit) && (*fib != &*fi) )
if(Clean<MeshType>::TestFaceFaceIntersection(&*fi,*fib)){
ret.push_back(*fib);
if(!Intersected) {
ret.push_back(&*fi);
Intersected=true;
}
}
}
inBox.clear();
}
FaceType::DeleteBitFlag(referredBit);
return (ret.size()>0);
}
/**
This function simply test that the vn and fn counters be consistent with the size of the containers and the number of deleted simplexes.
*/
static bool IsSizeConsistent(MeshType &m)
{
int DeletedVertNum=0;
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi)
if((*vi).IsD()) DeletedVertNum++;
int DeletedEdgeNum=0;
for (EdgeIterator ei = m.edge.begin(); ei != m.edge.end(); ++ei)
if((*ei).IsD()) DeletedEdgeNum++;
int DeletedFaceNum=0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if((*fi).IsD()) DeletedFaceNum++;
if(size_t(m.vn+DeletedVertNum) != m.vert.size()) return false;
if(size_t(m.en+DeletedEdgeNum) != m.edge.size()) return false;
if(size_t(m.fn+DeletedFaceNum) != m.face.size()) return false;
return true;
}
/**
This function simply test that all the faces have a consistent face-face topology relation.
useful for checking that a topology modifying algorithm does not mess something.
*/
static bool IsFFAdjacencyConsistent(MeshType &m)
{
RequireFFAdjacency(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{
for(int i=0;i<3;++i)
if(!FFCorrectness(*fi, i)) return false;
}
return true;
}
/**
This function simply test that a mesh has some reasonable tex coord.
*/
static bool HasConsistentPerWedgeTexCoord(MeshType &m)
{
tri::RequirePerFaceWedgeTexCoord(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{ FaceType &f=(*fi);
if( ! ( (f.WT(0).N() == f.WT(1).N()) && (f.WT(0).N() == (*fi).WT(2).N()) ) )
return false; // all the vertices must have the same index.
if((*fi).WT(0).N() <0) return false; // no undefined texture should be allowed
}
return true;
}
/**
Simple check that there are no face with all collapsed tex coords.
*/
static bool HasZeroTexCoordFace(MeshType &m)
{
tri::RequirePerFaceWedgeTexCoord(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{
if( (*fi).WT(0).P() == (*fi).WT(1).P() && (*fi).WT(0).P() == (*fi).WT(2).P() ) return false;
}
return true;
}
/**
This function test if two triangular faces of a mesh intersect.
It assumes that the faces (as storage) are different (e.g different address)
If the two faces are different but coincident (same set of vertexes) return true.
if the faces share an edge no test is done.
if the faces share only a vertex, the opposite edge is tested against the face
*/
static bool TestFaceFaceIntersection(FaceType *f0,FaceType *f1)
{
int sv = face::CountSharedVertex(f0,f1);
if(sv==3) return true;
if(sv==0) return (vcg::IntersectionTriangleTriangle<FaceType>((*f0),(*f1)));
// if the faces share only a vertex, the opposite edge (as a segment) is tested against the face
// to avoid degenerate cases where the two triangles have the opposite edge on a common plane
// we offset the segment to test toward the shared vertex
if(sv==1)
{
int i0,i1; ScalarType a,b;
face::FindSharedVertex(f0,f1,i0,i1);
CoordType shP = f0->V(i0)->P()*0.5;
if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f0).V1(i0)->P()*0.5+shP,(*f0).V2(i0)->P()*0.5+shP), *f1, a, b) )
{
// a,b are the param coords of the intersection point of the segment.
if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false;
return true;
}
if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f1).V1(i1)->P()*0.5+shP,(*f1).V2(i1)->P()*0.5+shP), *f0, a, b) )
{
// a,b are the param coords of the intersection point of the segment.
if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false;
return true;
}
}
return false;
}
/**
This function merge all the vertices that are closer than the given radius
*/
static int MergeCloseVertex(MeshType &m, const ScalarType radius)
{
int mergedCnt=0;
mergedCnt = ClusterVertex(m,radius);
RemoveDuplicateVertex(m,true);
return mergedCnt;
}
static int ClusterVertex(MeshType &m, const ScalarType radius)
{
if(m.vn==0) return 0;
// some spatial indexing structure does not work well with deleted vertices...
tri::Allocator<MeshType>::CompactVertexVector(m);
typedef vcg::SpatialHashTable<VertexType, ScalarType> SampleSHT;
SampleSHT sht;
tri::EmptyTMark<MeshType> markerFunctor;
std::vector<VertexType*> closests;
int mergedCnt=0;
sht.Set(m.vert.begin(), m.vert.end());
UpdateFlags<MeshType>::VertexClearV(m);
for(VertexIterator viv = m.vert.begin(); viv!= m.vert.end(); ++viv)
if(!(*viv).IsD() && !(*viv).IsV())
{
(*viv).SetV();
Point3<ScalarType> p = viv->cP();
Box3<ScalarType> bb(p-Point3<ScalarType>(radius,radius,radius),p+Point3<ScalarType>(radius,radius,radius));
GridGetInBox(sht, markerFunctor, bb, closests);
// qDebug("Vertex %i has %i closest", &*viv - &*m.vert.begin(),closests.size());
for(size_t i=0; i<closests.size(); ++i)
{
ScalarType dist = Distance(p,closests[i]->cP());
if(dist < radius && !closests[i]->IsV())
{
// printf("%f %f \n",dist,radius);
mergedCnt++;
closests[i]->SetV();
closests[i]->P()=p;
}
}
}
return mergedCnt;
}
static std::pair<int,int> RemoveSmallConnectedComponentsSize(MeshType &m, int maxCCSize)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
std::vector<typename MeshType::FacePointer> FPV;
if(CCV[i].first<maxCCSize)
{
DeletedCC++;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
FPV.push_back(*ci);
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/// Remove the connected components smaller than a given diameter
// it returns a pair with the number of connected components and the number of deleted ones.
static std::pair<int,int> RemoveSmallConnectedComponentsDiameter(MeshType &m, ScalarType maxDiameter)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
tri::ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
Box3<ScalarType> bb;
std::vector<typename MeshType::FacePointer> FPV;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
{
FPV.push_back(*ci);
bb.Add((*ci)->P(0));
bb.Add((*ci)->P(1));
bb.Add((*ci)->P(2));
}
if(bb.Diag()<maxDiameter)
{
DeletedCC++;
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
tri::Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/// Remove the connected components greater than a given diameter
// it returns a pair with the number of connected components and the number of deleted ones.
static std::pair<int,int> RemoveHugeConnectedComponentsDiameter(MeshType &m, ScalarType minDiameter)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
tri::ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
Box3f bb;
std::vector<typename MeshType::FacePointer> FPV;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
{
FPV.push_back(*ci);
bb.Add((*ci)->P(0));
bb.Add((*ci)->P(1));
bb.Add((*ci)->P(2));
}
if(bb.Diag()>minDiameter)
{
DeletedCC++;
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
tri::Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/**
Select the folded faces using an angle threshold on the face normal.
The face is selected if the dot product between the face normal and the normal of the plane fitted
using the vertices of the one ring faces is below the cosThreshold.
The cosThreshold requires a negative cosine value (a positive value is clamp to zero).
*/
static void SelectFoldedFaceFromOneRingFaces(MeshType &m, ScalarType cosThreshold)
{
tri::RequireVFAdjacency(m);
tri::RequirePerFaceNormal(m);
tri::RequirePerVertexNormal(m);
vcg::tri::UpdateSelection<MeshType>::FaceClear(m);
vcg::tri::UpdateNormal<MeshType>::PerFaceNormalized(m);
vcg::tri::UpdateNormal<MeshType>::PerVertexNormalized(m);
vcg::tri::UpdateTopology<MeshType>::VertexFace(m);
if (cosThreshold > 0)
cosThreshold = 0;
#pragma omp parallel for schedule(dynamic, 10)
for (int i = 0; i < m.face.size(); i++)
{
std::vector<typename MeshType::VertexPointer> nearVertex;
std::vector<typename MeshType::CoordType> point;
typename MeshType::FacePointer f = &m.face[i];
for (int j = 0; j < 3; j++)
{
std::vector<typename MeshType::VertexPointer> temp;
vcg::face::VVStarVF<typename MeshType::FaceType>(f->V(j), temp);
typename std::vector<typename MeshType::VertexPointer>::iterator iter = temp.begin();
for (; iter != temp.end(); iter++)
{
if ((*iter) != f->V1(j) && (*iter) != f->V2(j))
{
nearVertex.push_back((*iter));
point.push_back((*iter)->P());
}
}
nearVertex.push_back(f->V(j));
point.push_back(f->P(j));
}
if (point.size() > 3)
{
vcg::Plane3<typename MeshType::ScalarType> plane;
vcg::FitPlaneToPointSet(point, plane);
float avgDot = 0;
for (int j = 0; j < nearVertex.size(); j++)
avgDot += plane.Direction().dot(nearVertex[j]->N());
avgDot /= nearVertex.size();
typename MeshType::VertexType::NormalType normal;
if (avgDot < 0)
normal = -plane.Direction();
else
normal = plane.Direction();
if (normal.dot(f->N()) < cosThreshold)
f->SetS();
}
}
}
}; // end class
/*@}*/
} //End Namespace Tri
} // End Namespace vcg
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_unaryop__abs_int32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_uint16
// op(A') function: GB_tran__abs_int32_uint16
// C type: int32_t
// A type: uint16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_uint16
(
int32_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv_im2col_sgemm_sse_pack.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv_im2col_sgemm_sse_pack(const Mat &bottom_blob, Mat &top_blob,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt,
int inch, int outch, int outh, int outw)
{
size_t elemsize = bottom_blob.elemsize;
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 4 x 4
Mat bottom_im2col = bottom_blob;
Mat bottom_tm = top_blob;
bottom_tm.create(4*kernel_size, inch, out_size/4 + out_size%4, elemsize, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/4);
for (int q=0; q<inch*kernel_size; q++)
{
#if __SSE__
_mm_storeu_ps(tmpptr, _mm_loadu_ps(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
#endif // __SSE__
tmpptr += 4;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i/4 + i%4);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
}
}
|
ast-dump-openmp-parallel.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(void) {
#pragma omp parallel
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel.c:3:1, line:6:1> line:3:6 test 'void (void)'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:6:1>
// CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-parallel.c:4:1) *const restrict'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.