source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/fx-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *image,const char *expression)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: the expression.
%
*/
MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=image;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishAlignedMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% ExceptionInfo *exception)
% Image *AddNoiseImageChannel(const Image *image,const ChannelType channel,
% const NoiseType noise_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
ExceptionInfo *exception)
{
Image
*noise_image;
noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception);
return(noise_image);
}
MagickExport Image *AddNoiseImageChannel(const Image *image,
const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
const char
*option;
double
attenuate;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,channel,noise_type,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
InheritException(exception,&noise_image->exception);
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
attenuate=1.0;
option=GetImageArtifact(image,"attenuate");
if (option != (char *) NULL)
attenuate=StringToDouble(option,(char **) NULL);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict noise_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(GenerateDifferentialNoise(random_info[id],
GetPixelRed(p),noise_type,attenuate)));
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
else
{
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelGreen(p),noise_type,attenuate)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelBlue(p),noise_type,attenuate)));
}
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelOpacity(p),noise_type,attenuate)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(noise_indexes+x,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],GetPixelIndex(
indexes+x),noise_type,attenuate)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shift_image->exception);
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
Quantum
quantum;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(p);
if (GetPixelGreen(p) < quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) < quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(GetPixelRed(p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(p)+factor*quantum);
quantum=GetPixelRed(p);
if (GetPixelGreen(p) > quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) > quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image);
(void) NegateImage(charcoal_image,MagickFalse);
(void) GrayscaleImage(charcoal_image,image->intensity);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *opacity,
% const PixelPacket colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A character string indicating the level of opacity as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *opacity,
const PixelPacket colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
CacheView
*colorize_view,
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&colorize_image->exception);
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) ||
(IsPixelGray(&colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace);
if ((colorize_image->matte == MagickFalse) &&
(colorize.opacity != OpaqueOpacity))
(void) SetImageAlphaChannel(colorize_image,OpaqueAlphaChannel);
if (opacity == (const char *) NULL)
return(colorize_image);
/*
Determine RGB values of the pen color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
colorize_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,colorize_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,((GetPixelRed(p)*(100.0-pixel.red)+
colorize.red*pixel.red)/100.0));
SetPixelGreen(q,((GetPixelGreen(p)*(100.0-pixel.green)+
colorize.green*pixel.green)/100.0));
SetPixelBlue(q,((GetPixelBlue(p)*(100.0-pixel.blue)+
colorize.blue*pixel.blue)/100.0));
if (colorize_image->matte == MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
else
SetPixelOpacity(q,((GetPixelOpacity(p)*(100.0-pixel.opacity)+
colorize.opacity*pixel.opacity)/100.0));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(colorize_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
colorize_view=DestroyCacheView(colorize_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Create color matrix.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass) == MagickFalse)
{
InheritException(exception,&color_image->exception);
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
ColorMatrix image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickRealType
pixel;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register IndexPacket
*magick_restrict color_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
color_indexes=GetCacheViewAuthenticIndexQueue(color_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
pixel=ColorMatrix[v][0]*GetPixelRed(p)+ColorMatrix[v][1]*
GetPixelGreen(p)+ColorMatrix[v][2]*GetPixelBlue(p);
if (image->matte != MagickFalse)
pixel+=ColorMatrix[v][3]*(QuantumRange-GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
pixel+=ColorMatrix[v][4]*GetPixelIndex(indexes+x);
pixel+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: SetPixelRed(q,ClampToQuantum(pixel)); break;
case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break;
case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break;
case 3:
{
if (image->matte != MagickFalse)
SetPixelAlpha(q,ClampToQuantum(pixel));
break;
}
case 4:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(color_indexes+x,ClampToQuantum(pixel));
break;
}
}
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
% const ChannelType channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,double *alpha,
% Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,const Image *image,
ChannelType channel,const char *symbol,ExceptionInfo *exception)
{
char
channel_symbol[MaxTextExtent],
key[MaxTextExtent],
statistic[MaxTextExtent];
const char
*value;
register const char
*p;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
*channel_symbol='\0';
if (*p == '.')
{
ssize_t
option;
(void) CopyMagickString(channel_symbol,p+1,MaxTextExtent);
option=ParseCommandOption(MagickChannelOptions,MagickTrue,channel_symbol);
if (option >= 0)
channel=(ChannelType) option;
}
(void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
return(QuantumScale*StringToDouble(value,(char **) NULL));
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageChannelDepth(image,channel,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",(double) depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",
standard_deviation);
}
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MaxTextExtent];
const char
*p,
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
MagickPixelPacket
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetMagickPixelPacket(image,&pixel);
status=InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((strlen(p) > 2) &&
(LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MaxTextExtent];
(void) CopyMagickString(name,p,MaxTextExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
MagickPixelPacket
*color;
color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors,
name);
if (color != (MagickPixelPacket *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(name),
CloneMagickPixelPacket(&pixel));
p+=strlen(name);
}
}
}
(void) CopyMagickString(symbol,p,MaxTextExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedChannel: return(QuantumScale*pixel.red);
case GreenChannel: return(QuantumScale*pixel.green);
case BlueChannel: return(QuantumScale*pixel.blue);
case OpacityChannel:
{
double
alpha;
if (pixel.matte == MagickFalse)
return(1.0);
alpha=(double) (QuantumScale*GetPixelAlpha(&pixel));
return(alpha);
}
case IndexChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
case DefaultChannels:
return(QuantumScale*GetMagickPixelIntensity(image,&pixel));
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((double) (QuantumScale*GetPixelAlpha(&pixel)));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case OpacityChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BlueChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case OpacityChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case IndexChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"intensity") == 0)
return(QuantumScale*GetMagickPixelIntensity(image,&pixel));
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminance;
luminance=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminance);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.opacity);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->x_resolution)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->y_resolution)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
{
double
depth;
depth=(double) GetImageChannelDepth(image,channel,fx_info->exception);
return(depth);
}
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
double *beta,ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
char
*q,
*subexpression;
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
FxReturn(0.0);
}
FxReturn(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=fabs(floor((*beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
FxReturn(0.0);
}
FxReturn(fmod(alpha,(double) *beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) > (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) > (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
double
gamma;
(void) CopyMagickString(subexpression,++p,MaxTextExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
char
numeric[MaxTextExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
(void) FormatLocaleString(numeric,MaxTextExtent,"%.20g",(double)
*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MaxTextExtent);
if (strlen(subexpression) != 0)
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel: type="cyan"; break;
case MagentaChannel: type="magenta"; break;
case YellowChannel: type="yellow"; break;
case OpacityChannel: type="opacity"; break;
case BlackChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedChannel: type="red"; break;
case GreenChannel: type="green"; break;
case BlueChannel: type="blue"; break;
case OpacityChannel: type="opacity"; break;
default: type="unknown"; break;
}
*subexpression='\0';
if (strlen(subexpression) > 1)
(void) CopyMagickString(subexpression,expression+6,MaxTextExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,
"%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename,
(double) x,(double) y,type,subexpression,GetMagickPrecision(),
(double) alpha);
FxReturn(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (LocaleNCompare(expression,"erp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
FxReturn(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType)
(*beta+0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha));
FxReturn(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn((double) QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gamma=alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta);
FxReturn(gamma);
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn((double) QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
double
alpha;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
gamma=(sin((MagickPI*alpha))/(MagickPI*alpha));
FxReturn(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
} while (fabs(alpha) >= MagickEpsilon);
FxReturn(*beta);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
FxReturn(alpha);
}
MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
fx_info->file=file;
return(status);
}
MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const ChannelType channel,const ssize_t x,const ssize_t y,double *alpha,
ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
% Image *FxImageChannel(const Image *image,const ChannelType channel,
% const char *expression,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
Image
*fx_image;
fx_image=FxImageChannel(image,GrayChannel,expression,exception);
return(fx_image);
}
MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel,
const char *expression,ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse)
{
InheritException(exception,&fx_image->exception);
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
alpha;
register IndexPacket
*magick_restrict fx_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view);
alpha=0.0;
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y,
&alpha,exception);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & GreenChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y,
&alpha,exception);
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & BlueChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y,
&alpha,exception);
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & OpacityChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y,
&alpha,exception);
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange*
alpha));
else
SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange-
QuantumRange*alpha)));
}
if (((channel & IndexChannel) != 0) &&
(fx_image->colorspace == CMYKColorspace))
{
(void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y,
&alpha,exception);
SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType)
QuantumRange*alpha));
}
q++;
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImageChannel)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*image_view,
*implode_view;
double
radius;
Image
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
implode_image=CloneImage(image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse)
{
InheritException(exception,&implode_image->exception);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
if (implode_image->background_color.opacity != OpaqueOpacity)
implode_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*image->columns;
center.y=0.5*image->rows;
radius=center.x;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
{
scale.x=(double) image->rows/(double) image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(implode_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,implode_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
MagickPixelPacket
pixel;
PointInfo
delta;
register IndexPacket
*magick_restrict implode_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin((double) (MagickPI*sqrt((double) distance)/
radius/2)),-amount);
status=InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+
center.x),(double) (factor*delta.y/scale.y+center.y),&pixel,
exception);
if (status == MagickFalse)
break;
SetPixelPacket(implode_image,&pixel,q,implode_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,
const size_t number_frames,ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
i;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (i=1; i < (ssize_t) number_frames; i++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (i=0; i < (ssize_t) number_frames; i++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (i+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*
next->rows+beta*GetNextImageInList(next)->rows+0.5),
next->filter,next->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse)
{
InheritException(exception,&morph_image->exception);
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,
GetNextImageInList(next)->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(alpha*
GetPixelRed(q)+beta*GetPixelRed(p)));
SetPixelGreen(q,ClampToQuantum(alpha*
GetPixelGreen(q)+beta*GetPixelGreen(p)));
SetPixelBlue(q,ClampToQuantum(alpha*
GetPixelBlue(q)+beta*GetPixelBlue(p)));
SetPixelOpacity(q,ClampToQuantum(alpha*
GetPixelOpacity(q)+beta*GetPixelOpacity(p)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (i < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const MagickRealType pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
MagickExport MagickBooleanType PlasmaImageProxy(Image *image,
CacheView *image_view,CacheView *u_view,CacheView *v_view,
RandomInfo *random_info,const SegmentInfo *segment,size_t attenuate,
size_t depth)
{
ExceptionInfo
*exception;
double
plasma;
PixelPacket
u,
v;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
exception=(&image->exception);
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
register PixelPacket
*magick_restrict q;
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/
2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/
2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+
v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
register PixelPacket
*magick_restrict q;
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/
2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+
v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
register PixelPacket
*magick_restrict q;
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+
v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+
v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
register PixelPacket
*magick_restrict q;
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,x,y,&u,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(v_view,x,y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/
2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,&image->exception);
u_view=AcquireVirtualCacheView(image,&image->exception);
v_view=AcquireVirtualCacheView(image,&image->exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the AnnotateImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const double angle,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const double angle,ExceptionInfo *exception)
{
const char
*value;
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
value=GetImageProperty(image,"Caption");
if (value != (const char *) NULL)
{
char
*caption;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,
value);
if (caption != (char *) NULL)
{
char
geometry[MaxTextExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,caption);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&caption);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5));
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image);
(void) CloneString(&annotate_info->text,caption);
(void) FormatLocaleString(geometry,MaxTextExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
caption=DestroyString(caption);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image);
(void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,OverCompositeOp,caption_image,
quantum,(ssize_t) (image->rows+3*quantum/2));
caption_image=DestroyImage(caption_image);
}
(void) QueryColorDatabase("none",&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
InheritException(&bend_image->exception,exception);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,OverCompositeOp,picture_image,
(ssize_t) (-0.01*picture_image->columns/2.0),0L);
picture_image=DestroyImage(picture_image);
(void) QueryColorDatabase("none",&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sepia_image->exception);
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(q,ClampToQuantum(tone));
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(q,ClampToQuantum(tone));
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(q,ClampToQuantum(tone));
tone=threshold/7.0;
if ((double) GetPixelGreen(q) < tone)
SetPixelGreen(q,ClampToQuantum(tone));
if ((double) GetPixelBlue(q) < tone)
SetPixelBlue(q,ClampToQuantum(tone));
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image);
(void) ContrastImage(sepia_image,MagickTrue);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double opacity,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double opacity,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod);
clone_image->compose=OverCompositeOp;
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorDatabase("none",&clone_image->border_color,exception);
border_image=BorderImage(clone_image,&border_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->matte == MagickFalse)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel);
/*
Shadow image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(border_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(border_image,border_image,border_image->rows,1)
#endif
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
SetPixelRed(q,border_image->background_color.red);
SetPixelGreen(q,border_image->background_color.green);
SetPixelBlue(q,border_image->background_color.blue);
if (border_image->matte == MagickFalse)
SetPixelOpacity(q,border_image->background_color.opacity);
else
SetPixelOpacity(q,ClampToQuantum((double) (QuantumRange-
GetPixelAlpha(q)*opacity/100.0)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadowImage)
#endif
proceed=SetImageProgress(image,ShadowImageTag,progress++,
border_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
MagickPixelPacket
zero;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
random_view=AcquireAuthenticCacheView(random_image,exception);
status=MagickTrue;
GetMagickPixelPacket(random_image,&zero);
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(random_view);
pixel=zero;
for (x=0; x < (ssize_t) random_image->columns; x++)
{
pixel.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
pixel.green=pixel.red;
pixel.blue=pixel.red;
if (image->colorspace == CMYKColorspace)
pixel.index=pixel.red;
SetPixelPacket(random_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_view=DestroyCacheView(random_view);
random_image=DestroyImage(random_image);
return(random_image);
}
random_view=DestroyCacheView(random_view);
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image);
(void) NegateImage(dodge_image,MagickFalse);
(void) TransformImage(&dodge_image,(char *) NULL,"50%");
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold)
% MagickBooleanType SolarizeImageChannel(Image *image,
% const ChannelType channel,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold)
{
MagickBooleanType
status;
status=SolarizeImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType SolarizeImageChannel(Image *image,
const ChannelType channel,const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((channel & GreenChannel) != 0)
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((channel & BlueChannel) != 0)
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
if ((double) GetPixelRed(q) > threshold)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
if ((double) GetPixelGreen(q) > threshold)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
if ((double) GetPixelBlue(q) > threshold)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelPacket
pixel;
register PixelPacket
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stegano_image->exception);
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
(void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception);
if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (PixelPacket *) NULL)
break;
switch (c)
{
case 0:
{
SetBit(GetPixelRed(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 1:
{
SetBit(GetPixelGreen(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 2:
{
SetBit(GetPixelBlue(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (stegano_image->storage_class == PseudoClass)
(void) SyncImage(stegano_image);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stereo_image->exception);
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register PixelPacket
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(r,GetPixelRed(p));
SetPixelGreen(r,GetPixelGreen(q));
SetPixelBlue(r,GetPixelBlue(q));
SetPixelOpacity(r,(GetPixelOpacity(p)+q->opacity)/2);
p++;
q++;
r++;
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*image_view,
*swirl_view;
double
radius;
Image
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
swirl_image=CloneImage(image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse)
{
InheritException(exception,&swirl_image->exception);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.opacity != OpaqueOpacity)
swirl_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
scale.x=(double) image->rows/(double) image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(swirl_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,swirl_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
MagickPixelPacket
pixel;
PointInfo
delta;
register IndexPacket
*magick_restrict swirl_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt(distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/
scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+
center.y),&pixel,exception);
if (status == MagickFalse)
break;
SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *opacity,
% const PixelPacket tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *opacity,
const PixelPacket tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
color_vector,
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&tint_image->exception);
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelGray(&tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace);
if (opacity == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the tint color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0-
PixelPacketIntensity(&tint));
color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0-
PixelPacketIntensity(&tint));
color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0-
PixelPacketIntensity(&tint));
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
weight;
MagickPixelPacket
pixel;
weight=QuantumScale*GetPixelRed(p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
SetPixelRed(q,ClampToQuantum(pixel.red));
weight=QuantumScale*GetPixelGreen(p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(p)+color_vector.green*(1.0-
(4.0*(weight*weight)));
SetPixelGreen(q,ClampToQuantum(pixel.green));
weight=QuantumScale*GetPixelBlue(p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MaxTextExtent];
DrawInfo
*draw_info;
Image
*blur_image,
*canvas_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse)
{
InheritException(exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
canvas_image->matte=MagickTrue;
oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows,
MagickTrue,exception);
if (oval_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
(void) QueryColorDatabase("#000000",&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorDatabase("#ffffff",&draw_info->fill,exception);
(void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception);
(void) FormatLocaleString(ellipse,MaxTextExtent,
"ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0,
image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
blur_image->matte=MagickFalse;
(void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception);
canvas_image=DestroyImage(canvas_image);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*image_view,
*wave_view;
Image
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse)
{
InheritException(exception,&wave_image->exception);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
if (wave_image->background_color.opacity != OpaqueOpacity)
wave_image->matte=MagickTrue;
/*
Allocate sine map.
*/
sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (MagickRealType *) NULL)
{
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(wave_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(wave_view);
pixel=zero;
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel,
exception);
if (status == MagickFalse)
break;
SetPixelPacket(wave_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
image_view=DestroyCacheView(image_view);
sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride,
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
size_t
max_channels;
ssize_t
channel;
static const double
noise_levels[]= {
0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044 };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
noise_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,3*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=image->columns*image->rows;
max_channels=(size_t) (image->colorspace == CMYKColorspace ? 4 : 3);
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) max_channels; channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
if (status == MagickFalse)
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (channel)
{
case 0: pixels[i]=(float) GetPixelRed(p); break;
case 1: pixels[i]=(float) GetPixelGreen(p); break;
case 2: pixels[i]=(float) GetPixelBlue(p); break;
case 3: pixels[i]=(float) indexes[x]; break;
default: break;
}
i++;
p++;
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*magick_restrict noise_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
float
pixel;
pixel=pixels[i]+pixels[low_pass+i];
switch (channel)
{
case 0: SetPixelRed(q,ClampToQuantum(pixel)); break;
case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break;
case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break;
case 3: SetPixelIndex(noise_indexes+x,ClampToQuantum(pixel)); break;
default: break;
}
i++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,max_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
return(noise_image);
}
|
local_temperature_average_response_function.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Jordi Cotela
//
#ifndef KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED
#define KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED
#include "includes/kratos_flags.h"
#include "includes/model_part.h"
#include "utilities/variable_utils.h"
#include "response_functions/adjoint_response_function.h"
namespace Kratos {
///@addtogroup ConvectionDiffusionApplication
///@{
///@name Kratos Classes
///@{
class LocalTemperatureAverageResponseFunction: public AdjointResponseFunction
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(LocalTemperatureAverageResponseFunction);
///@}
///@name Life Cycle
///@{
/// Constructor.
LocalTemperatureAverageResponseFunction(Parameters Settings, ModelPart& rModelPart)
: mrModelPart(rModelPart)
{
KRATOS_TRY;
std::string target_model_part = Settings["model_part_name"].GetString();
auto& r_target_model_part = GetTargetModelPart(rModelPart, target_model_part);
auto& r_nodes = r_target_model_part.Nodes();
mNumNodes = r_nodes.size();
VariableUtils variable_utils;
variable_utils.SetFlag(STRUCTURE,true,r_nodes);
// Note: this should not be parallel, the operation is not threadsafe if the variable is uninitialized
for (auto& r_node : r_nodes)
{
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS,0);
}
mNumNodes = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(mNumNodes);
auto& r_elements = rModelPart.Elements();
const int num_elements = r_elements.size();
#pragma omp parallel for
for (int i = 0; i < num_elements; i++)
{
auto i_elem = r_elements.begin() + i;
auto& r_geom = i_elem->GetGeometry();
for (unsigned int i = 0; i < r_geom.PointsNumber(); i++)
{
auto& r_node = r_geom[i];
if (r_node.Is(STRUCTURE))
{
r_node.SetLock();
r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS) += 1;
r_node.UnSetLock();
}
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS);
KRATOS_CATCH("");
}
/// Destructor.
~LocalTemperatureAverageResponseFunction() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void Initialize() override
{
KRATOS_TRY;
KRATOS_CATCH("");
}
void CalculateGradient(const Element& rAdjointElement,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient);
}
void CalculateGradient(const Condition& rAdjointCondition,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
noalias(rResponseGradient) = ZeroVector(rResidualGradient.size1());
}
void CalculateFirstDerivativesGradient(const Element& rAdjointElement,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient);
}
void CalculateSecondDerivativesGradient(const Element& rAdjointElement,
const Matrix& rResidualGradient,
Vector& rResponseGradient,
const ProcessInfo& rProcessInfo) override
{
ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient);
}
void CalculatePartialSensitivity(Element& rAdjointElement,
const Variable<array_1d<double, 3>>& rVariable,
const Matrix& rSensitivityMatrix,
Vector& rSensitivityGradient,
const ProcessInfo& rProcessInfo) override
{
if (rSensitivityGradient.size() != rSensitivityMatrix.size1())
rSensitivityGradient.resize(rSensitivityMatrix.size1(), false);
noalias(rSensitivityGradient) = ZeroVector(rSensitivityMatrix.size1());
}
void CalculatePartialSensitivity(Condition& rAdjointElement,
const Variable<array_1d<double, 3>>& rVariable,
const Matrix& rSensitivityMatrix,
Vector& rSensitivityGradient,
const ProcessInfo& rProcessInfo) override
{
if (rSensitivityGradient.size() != rSensitivityMatrix.size1())
rSensitivityGradient.resize(rSensitivityMatrix.size1(), false);
noalias(rSensitivityGradient) = ZeroVector(rSensitivityMatrix.size1());
}
double CalculateValue(ModelPart& rModelPart) override
{
KRATOS_TRY;
KRATOS_ERROR
<< "PointTemperature::CalculateValue(ModelPart& rModelPart) is not implemented!!!\n";
KRATOS_CATCH("");
}
///@}
protected:
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
private:
///@name Member Variables
///@{
ModelPart& mrModelPart;
int mNumNodes = 0;
///@}
///@name Private Operators
///@{
void ComputePointTemperatureSensitivityContribution(
const Matrix& rDerivativesOfResidual,
const Element::NodesArrayType& rNodes,
Vector& rLocalSensitivityContribution) const
{
if (rLocalSensitivityContribution.size() != rDerivativesOfResidual.size1())
rLocalSensitivityContribution.resize(rDerivativesOfResidual.size1(), false);
noalias(rLocalSensitivityContribution) = ZeroVector(rLocalSensitivityContribution.size());
const unsigned int num_nodes = rNodes.size();
for (unsigned int i = 0; i < num_nodes; i++)
{
if (rNodes[i].Is(STRUCTURE))
{
double factor = 1.0 / (rNodes[i].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS)*mNumNodes);
rLocalSensitivityContribution[i] = factor;
}
}
}
ModelPart& GetTargetModelPart(ModelPart& rModelPart, const std::string& rTargetModelPartName)
{
KRATOS_TRY;
if (rModelPart.Name() == rTargetModelPartName)
{
return rModelPart;
}
else if (rModelPart.HasSubModelPart(rTargetModelPartName))
{
return rModelPart.GetSubModelPart(rTargetModelPartName);
}
else
{
KRATOS_ERROR << "Unknown ModelPart " << rTargetModelPartName << "." << std::endl;
}
KRATOS_CATCH("")
return rModelPart;
}
///@}
///@name Private Operations
///@{
///@}
};
///@} // Kratos Classes
///@} // ConvectionDiffusionApplication group
}
#endif // KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED
|
collision.c | #include <stdio.h>
#include <omp.h>
int main()
{
int n;
#pragma omp parallel private(n)
{
n = omp_get_thread_num();
printf("Before critical %i \n", n);
#pragma omp critical
{
printf("Thread %i is working \n", n);
};
printf("After critical %i \n", n);
}
return 0;
} |
wtsne_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef WSNE_INL
#define WSNE_INL
#include "hdi/dimensionality_reduction/wtsne.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include "hdi/utils/scoped_timers.h"
#include "weighted_sptree.h"
#include <random>
#ifdef __USE_GCD__
#include <dispatch/dispatch.h>
#endif
#pragma warning( push )
#pragma warning( disable : 4267)
#pragma warning( push )
#pragma warning( disable : 4291)
#pragma warning( push )
#pragma warning( disable : 4996)
#pragma warning( push )
#pragma warning( disable : 4018)
#pragma warning( push )
#pragma warning( disable : 4244)
//#define FLANN_USE_CUDA
#include "flann/flann.h"
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
namespace hdi{
namespace dr{
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
WeightedTSNE<scalar, sparse_scalar_matrix>::Parameters::Parameters():
_seed(-1),
_embedding_dimensionality(2),
_minimum_gain(0.1),
_eta(200),
_momentum(0.2),
_final_momentum(0.5),
_mom_switching_iter(250),
_exaggeration_factor(4),
_remove_exaggeration_iter(250),
_exponential_decay_iter(150)
{}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
WeightedTSNE<scalar, sparse_scalar_matrix>::WeightedTSNE():
_initialized(false),
_logger(nullptr),
_theta(0)
{
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::reset(){
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::clear(){
_embedding->clear();
_initialized = false;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::getEmbeddingPosition(scalar_vector_type& embedding_position, data_handle_type handle)const{
if(!_initialized){
throw std::logic_error("Algorithm must be initialized before ");
}
embedding_position.resize(_params._embedding_dimensionality);
for(int i = 0; i < _params._embedding_dimensionality; ++i){
(*_embedding_container)[i] = (*_embedding_container)[handle*_params._embedding_dimensionality + i];
}
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initialize(const sparse_scalar_matrix& probabilities, data::Embedding<scalar_type>* embedding, Parameters params){
utils::secureLog(_logger,"Initializing W-tSNE...");
{//Aux data
_params = params;
unsigned int size = probabilities.size();
unsigned int size_sq = probabilities.size()*probabilities.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
computeHighDimensionalDistribution(probabilities);
initializeEmbeddingPosition(params._seed);
computeWeights();
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initializeWithJointProbabilityDistribution(const sparse_scalar_matrix& distribution, data::Embedding<scalar_type>* embedding, Parameters params){
utils::secureLog(_logger,"Initializing W-tSNE with a user-defined joint-probability distribution...");
{//Aux data
_params = params;
unsigned int size = distribution.size();
unsigned int size_sq = distribution.size()*distribution.size();
_embedding = embedding;
_embedding_container = &(embedding->getContainer());
_embedding->resize(_params._embedding_dimensionality,size);
_P.resize(size);
_Q.resize(size_sq);
_gradient.resize(size*params._embedding_dimensionality,0);
_previous_gradient.resize(size*params._embedding_dimensionality,0);
_gain.resize(size*params._embedding_dimensionality,1);
}
utils::secureLogValue(_logger,"Number of data points",_P.size());
_P = distribution;
initializeEmbeddingPosition(params._seed);
computeWeights();
_iteration = 0;
_initialized = true;
utils::secureLog(_logger,"Initialization complete!");
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeHighDimensionalDistribution(const sparse_scalar_matrix& probabilities){
utils::secureLog(_logger,"Computing high-dimensional joint probability distribution...");
const int n = getNumberOfDataPoints();
//Can be improved by using the simmetry of the matrix (half the memory) //TODO
for(int j = 0; j < n; ++j){
for(auto& elem: probabilities[j]){
scalar_type v0 = elem.second;
auto iter = probabilities[elem.first].find(j);
scalar_type v1 = 0.;
if(iter != probabilities[elem.first].end())
v1 = iter->second;
_P[j][elem.first] = static_cast<scalar_type>((v0+v1)*0.5);
_P[elem.first][j] = static_cast<scalar_type>((v0+v1)*0.5);
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeWeights(){
_weights.clear();
_weights.resize(_P.size(),0);
for(int i = 0; i < _weights.size(); ++i){
for(auto v: _P[i]){
_weights[i] += v.second;
}
}
utils::secureLogVectorStats(_logger,"Weights",_weights);
}
//! Set weights (overwrites the default weights)
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::setWeights(const scalar_vector_type& weights){
checkAndThrowLogic(weights.size() == _P.size(), "setWeights: wrong size");
_weights = weights;
utils::secureLogVectorStats(_logger,"Weights",_weights);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::initializeEmbeddingPosition(int seed, double multiplier){
utils::secureLog(_logger,"Initializing the embedding...");
if(seed < 0){
std::srand(static_cast<unsigned int>(time(NULL)));
}
else{
std::srand(seed);
}
for(auto& v : (*_embedding_container)){
double x(0.);
double y(0.);
double radius(0.);
do {
x = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
y = 2 * (rand() / ((double)RAND_MAX + 1)) - 1;
radius = (x * x) + (y * y);
} while((radius >= 1.0) || (radius == 0.0));
radius = sqrt(-2 * log(radius) / radius);
x *= radius;
y *= radius;
v = static_cast<scalar_type>(x * multiplier);
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIteration(double mult){
if(!_initialized){
throw std::logic_error("Cannot compute a gradient descent iteration on unitialized data");
}
if(_iteration == _params._mom_switching_iter){
utils::secureLog(_logger,"Switch to final momentum...");
}
if(_iteration == _params._remove_exaggeration_iter){
utils::secureLog(_logger,"Remove exaggeration...");
}
if(_theta == 0){
doAnIterationExact(mult);
}else{
doAnIterationBarnesHut(mult);
}
}
template <typename scalar, typename sparse_scalar_matrix>
scalar WeightedTSNE<scalar, sparse_scalar_matrix>::exaggerationFactor(){
scalar_type exaggeration = 1;
if(_iteration <= _params._remove_exaggeration_iter){
exaggeration = _params._exaggeration_factor;
}else if(_iteration <= (_params._remove_exaggeration_iter + _params._exponential_decay_iter)){
double decay = std::exp(-scalar_type(_iteration-_params._remove_exaggeration_iter)/30.);
exaggeration = 1 + (_params._exaggeration_factor-1)*decay;
//utils::secureLogValue(_logger,"Exaggeration decay...",exaggeration);
}
return exaggeration;
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIterationExact(double mult){
//Compute Low-dimensional distribution
computeLowDimensionalDistribution();
//Compute gradient of the KL function
computeExactGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding(mult);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::doAnIterationBarnesHut(double mult){
//Compute gradient of the KL function using the Barnes Hut approximation
computeBarnesHutGradient(exaggerationFactor());
//Update the embedding based on the gradient
updateTheEmbedding();
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeLowDimensionalDistribution(){
const int n = getNumberOfDataPoints();
double sum_Q = 0;
// Loop over all edges in the graph
#ifdef __USE_GCD__
std::cout << "GCD dispatch, wtsne_inl 303.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
//_Q[j*n + j] = 0;
for(int i = j+1; i < n; ++i){
const double euclidean_dist_sq(
utils::euclideanDistanceSquared<scalar_type>(
(*_embedding_container).begin()+j*_params._embedding_dimensionality,
(*_embedding_container).begin()+(j+1)*_params._embedding_dimensionality,
(*_embedding_container).begin()+i*_params._embedding_dimensionality,
(*_embedding_container).begin()+(i+1)*_params._embedding_dimensionality
)
);
const double v = 1./(1.+euclidean_dist_sq);
_Q[j*n + i] = static_cast<scalar_type>(v);
_Q[i*n + j] = static_cast<scalar_type>(v);
}
}
#ifdef __USE_GCD__
);
#endif
for(int j = 0; j < n; ++j){
for(int i = 0; i < n; ++i){
sum_Q += _Q[j*n + i]*_weights[j]*_weights[i];
}
}
_normalization_Q = static_cast<scalar_type>(sum_Q);
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeExactGradient(double exaggeration){
const int n = getNumberOfDataPoints();
const int dim = _params._embedding_dimensionality;
for(int i = 0; i < n; ++i){
for(int d = 0; d < dim; ++d){
_gradient[i * dim + d] = 0;
}
}
for(int i = 0; i < n; ++i){
for(int j = 0; j < n; ++j){
for(int d = 0; d < dim; ++d){
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
const double negative(_weights[i] * _weights[j] * _Q[idx] * _Q[idx] * distance / _normalization_Q);
_gradient[i * dim + d] += static_cast<scalar_type>(-4*negative);
}
}
for(auto& elem: _P[i]){
for(int d = 0; d < dim; ++d){
const int j = elem.first;
const int idx = i*n + j;
const double distance((*_embedding_container)[i * dim + d] - (*_embedding_container)[j * dim + d]);
double p_ij = elem.second/n;
const double positive(p_ij * _Q[idx] * distance);
_gradient[i * dim + d] += static_cast<scalar_type>(4*exaggeration*positive);
}
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::computeBarnesHutGradient(double exaggeration){
typedef double hp_scalar_type;
WeightedSPTree<scalar_type> sptree(_params._embedding_dimensionality,_embedding->getContainer().data(),_weights.data(),getNumberOfDataPoints());
scalar_type sum_Q = .0;
std::vector<hp_scalar_type> positive_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
#ifdef __USE_GCD__
__block std::vector<hp_scalar_type> negative_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
#else
std::vector<hp_scalar_type> negative_forces(getNumberOfDataPoints()*_params._embedding_dimensionality);
#endif //__USE_GCD__
sptree.computeEdgeForces(_P, exaggeration, positive_forces.data());
#ifdef __USE_GCD__
__block std::vector<hp_scalar_type> sum_Q_subvalues(getNumberOfDataPoints(),0);
#else
std::vector<hp_scalar_type> sum_Q_subvalues(getNumberOfDataPoints(),0);
#endif //__USE_GCD__
#ifdef __USE_GCD__
std::cout << "GCD dispatch, wtsne_inl 303.\n";
dispatch_apply(getNumberOfDataPoints(), dispatch_get_global_queue(0, 0), ^(size_t n) {
#else
#pragma omp parallel for
for(int n = 0; n < getNumberOfDataPoints(); n++){
#endif //__USE_GCD__
sptree.computeNonEdgeForces(n, _theta, negative_forces.data() + n * _params._embedding_dimensionality, sum_Q_subvalues[n]);
}
#ifdef __USE_GCD__
);
#endif
sum_Q = 0;
for(int n = 0; n < getNumberOfDataPoints(); n++){
sum_Q += sum_Q_subvalues[n];
}
for(int i = 0; i < _gradient.size(); i++){
_gradient[i] = positive_forces[i] - (negative_forces[i] / sum_Q);
}
}
//temp
template <typename T>
T sign(T x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); }
template <typename scalar, typename sparse_scalar_matrix>
void WeightedTSNE<scalar, sparse_scalar_matrix>::updateTheEmbedding(double mult){
for(int i = 0; i < _gradient.size(); ++i){
_gain[i] = static_cast<scalar_type>((sign(_gradient[i]) != sign(_previous_gradient[i])) ? (_gain[i] + .2) : (_gain[i] * .8));
if(_gain[i] < _params._minimum_gain){
_gain[i] = static_cast<scalar_type>(_params._minimum_gain);
}
_gradient[i] = static_cast<scalar_type>((_gradient[i]>0?1:-1)*std::abs(_gradient[i]*_params._eta* _gain[i])/(_params._eta*_gain[i]));
_previous_gradient[i] = static_cast<scalar_type>(((_iteration<_params._mom_switching_iter)?_params._momentum:_params._final_momentum) * _previous_gradient[i] - _params._eta * _gain[i] * _gradient[i]);
(*_embedding_container)[i] += static_cast<scalar_type>(_previous_gradient[i] * mult);
}
++_iteration;
}
template <typename scalar, typename sparse_scalar_matrix>
double WeightedTSNE<scalar, sparse_scalar_matrix>::computeKullbackLeiblerDivergence(){
assert(false);
return 0;
}
}
}
#endif
|
memory.h | /**
* Copyright (C) 2007-2011 YU Zhi. All rights reserved.
* $Id$
* @file memory_pool.h
*
* created on: 2011-3-2
* Author: salmon
*/
#ifndef CORE_UTILITIES_MEMORY_POOL_H_
#define CORE_UTILITIES_MEMORY_POOL_H_
#include <simpla/SIMPLA_config.h>
#include <cstddef>
#include <cstring>
#include <memory>
#include "device_common.h"
namespace simpla {
/** @ingroup toolbox
* @addtogroup memory_pool Memory Pool
* @{
* @brief design to speed up frequently and repeatedly
* allocate operation of moderate size array or memory block.
*
*/
enum { MANAGED_MEMORY, HOST_MEMORY, DEVICE_MEMORY };
template <typename T>
int spMemoryAlloc(T **addr, size_t n, int location = MANAGED_MEMORY) {
if (addr == nullptr) { return SP_FAILED; };
#ifndef __CUDA__
*addr = reinterpret_cast<T *>(malloc(n * sizeof(T)));
#else
ASSERT(addr != nullptr);
SP_DEVICE_CALL(cudaMallocManaged(addr, n * sizeof(T)));
SP_DEVICE_CALL(cudaDeviceSynchronize());
#endif
return SP_SUCCESS;
// switch (location) {
// case MANAGED_MEMORY:
// SP_DEVICE_CALL(cudaMallocManaged(p, s));
// break;
// case DEVICE_MEMORY:
// SP_DEVICE_CALL(cudaMalloc(p, s));
// break;
// case HOST_MEMORY:
// default:
// *p = malloc(s);
// }
};
inline int spMemoryFree(void **dest, size_t n) {
if (dest == nullptr) { return SP_FAILED; }
#ifndef __CUDA__
free(*dest);
*dest = nullptr;
#else
SP_DEVICE_CALL(cudaFree(*dest));
SP_DEVICE_CALL(cudaDeviceSynchronize());
#endif
*dest = nullptr;
return SP_SUCCESS;
};
template <typename T>
int spMemoryFree(T **addr, size_t n) {
spMemoryFree((void **)addr, n * sizeof(T));
return SP_SUCCESS;
};
namespace detail {
struct deleter_device_ptr_s {
void *addr_;
size_t m_size_;
int m_loc_;
deleter_device_ptr_s(void *p, size_t s, int loc) : addr_(p), m_size_(s), m_loc_(loc) {}
~deleter_device_ptr_s() = default;
deleter_device_ptr_s(const deleter_device_ptr_s &) = default;
deleter_device_ptr_s(deleter_device_ptr_s &&) = default;
deleter_device_ptr_s &operator=(const deleter_device_ptr_s &) = default;
deleter_device_ptr_s &operator=(deleter_device_ptr_s &&) = default;
inline void operator()(void *ptr) { spMemoryFree(&ptr, m_size_); }
};
}
template <typename T>
std::shared_ptr<T> spMakeShared(T *d, size_t n, int location = MANAGED_MEMORY) {
T *addr = nullptr;
spMemoryAlloc(&addr, n, location);
return std::shared_ptr<T>(addr, simpla::detail::deleter_device_ptr_s(addr, n * sizeof(T), location));
}
#ifdef __CUDA__
namespace detail {
template <typename T>
__global__ void spCUDA_Assign(T *dest, T src, size_t n) {
size_t s = blockIdx.x * blockDim.x + threadIdx.x;
if (s < n) { dest[s] = src * threadIdx.x; };
}
template <typename T, typename U>
__global__ void spCUDA_Copy(T *dest, U const *src, size_t n) {
size_t s = blockIdx.x * blockDim.x + threadIdx.x;
if (s < n) { dest[s] = src[s]; };
}
template <typename T>
__global__ void spCUDA_Clear(T *dest, T src, size_t n) {
size_t s = blockIdx.x * blockDim.x + threadIdx.x;
if (s < n) { dest[s] = src * threadIdx.x; };
}
}
#endif
#define NUM_OF_THREAD 32
template <typename T>
int spMemoryClear(T *dest, size_t n) {
#ifndef __CUDA__
memset(reinterpret_cast<void *>(dest), 0, n * sizeof(T));
#else
cudaMemset(dest, 0, n * sizeof(T));
// SP_CALL_DEVICE_KERNEL(simpla::detail::spCUDA_Assign, (n + NUM_OF_THREAD) / NUM_OF_THREAD, NUM_OF_THREAD, dest, 0,
// n);
#endif
return SP_SUCCESS;
}
template <typename T>
int spMemoryFill(T *dest, T const &src, size_t n) {
#ifndef __CUDA__
#pragma omp parallel for
for (int i = 0; i < n; ++i) { dest[i] = src; }
#else
SP_CALL_DEVICE_KERNEL(simpla::detail::spCUDA_Assign, (n + NUM_OF_THREAD) / NUM_OF_THREAD, NUM_OF_THREAD, dest, src,
n);
#endif
return SP_SUCCESS;
}
template <typename U, typename V>
int spMemoryCopy(U *dest, V const *src, size_t n) {
#ifndef __CUDA__
#else
SP_CALL_DEVICE_KERNEL(simpla::detail::spCUDA_Copy, (n + NUM_OF_THREAD) / NUM_OF_THREAD, NUM_OF_THREAD, dest, src,
n);
#endif
return SP_SUCCESS;
}
template <typename T>
int spMemoryCopy(T *dest, T const *src, size_t n) {
#ifndef __CUDA__
memcpy(dest, src, n * sizeof(T));
#else
SP_DEVICE_CALL(cudaMemcpy((void *)dest, (void const *)src, n * sizeof(T), cudaMemcpyDefault));
#endif
return SP_SUCCESS;
}
} // namespace simpla
#endif // CORE_UTILITIES_MEMORY_POOL_H_
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% John Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
MagickRealType
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
MagickRealType
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(PrimitiveInfo *,const char *);
static void
TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo,
const MagickRealType,const MagickBooleanType,const MagickBooleanType),
TraceBezier(PrimitiveInfo *,const size_t),
TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo,
PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const MagickRealType);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireMagickMemory(sizeof(*draw_info));
if (draw_info == (DrawInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new draw_info structure is created initialized to
% default values, according to the given image_info.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
clone_info=(DrawInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (DrawInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
if (clone_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
&draw_info->fill_pattern->exception);
else
if (draw_info->tile != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue,
&draw_info->tile->exception);
clone_info->tile=NewImageList(); /* tile is deprecated */
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,&draw_info->stroke_pattern->exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; draw_info->dash_pattern[x] != 0.0; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern,
(size_t) (x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) CopyMagickMemory(clone_info->gradient.stops,
draw_info->gradient.stops,(size_t) number_stops*
sizeof(*clone_info->gradient.stops));
}
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
clone_info->bounds=draw_info->bounds;
clone_info->clip_units=draw_info->clip_units;
clone_info->render=draw_info->render;
clone_info->opacity=draw_info->opacity;
clone_info->element_reference=draw_info->element_reference;
clone_info->debug=IsEventLogging();
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info,
% const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int CompareEdges(const void *x,const void *y)
{
register const EdgeInfo
*p,
*q;
/*
Compare two edges.
*/
p=(const EdgeInfo *) x;
q=(const EdgeInfo *) y;
if ((p->points[0].y-MagickEpsilon) > q->points[0].y)
return(1);
if ((p->points[0].y+MagickEpsilon) < q->points[0].y)
return(-1);
if ((p->points[0].x-MagickEpsilon) > q->points[0].x)
return(1);
if ((p->points[0].x+MagickEpsilon) < q->points[0].x)
return(-1);
if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)-
(p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0)
return(1);
return(-1);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g %g - %g %g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g %g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(
const DrawInfo *magick_unused(draw_info),const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory((size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) ResetMagickMemory(&point,0,sizeof(point));
(void) ResetMagickMemory(&bounds,0,sizeof(bounds));
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((path_info[i].point.y == point.y) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((direction != 0) && (direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),CompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g %g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info)
{
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
}
coordinates--;
/*
Eliminate duplicate points.
*/
if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) > MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) > MagickEpsilon))
{
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue;
if ((fabs(p.x-primitive_info[i].point.x) <= MagickEpsilon) &&
(fabs(p.y-primitive_info[i].point.y) <= MagickEpsilon))
continue;
/*
Mark the p point as open if it does not match the q.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo
% structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->tile != (Image *) NULL)
draw_info->tile=DestroyImage(draw_info->tile);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
draw_info->signature=(~MagickSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx > MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept+MagickEpsilon;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept-MagickEpsilon;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept+MagickEpsilon;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept-MagickEpsilon;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx > MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept+MagickEpsilon;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept-MagickEpsilon;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept+MagickEpsilon;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept-MagickEpsilon;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=1.0/(affine->sx*affine->sy-affine->rx*affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetMagickPixelPacket(image,&zero);
exception=(&image->exception);
image_view=AcquireCacheView(image);
source_view=AcquireCacheView(source);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=(ssize_t) ceil(edge.y1-0.5); y <= (ssize_t) floor(edge.y2+0.5); y++)
{
MagickPixelPacket
composite,
pixel;
PointInfo
point;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) ((ssize_t) floor(inverse_edge.x2+0.5)-(ssize_t) floor(
inverse_edge.x1+0.5)+1),1,exception);
if (q == (PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
(void) InterpolateMagickPixelPacket(source,source_view,
UndefinedInterpolatePixel,point.x,point.y,&pixel,exception);
SetMagickPixelPacket(image,q,indexes+x_offset,&composite);
MagickPixelCompositeOver(&pixel,pixel.opacity,&composite,
composite.opacity,&composite);
SetPixelPacket(image,&composite,q,indexes+x_offset);
x_offset++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
% PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
*/
static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
const PolygonInfo *polygon_info)
{
DrawInfo
*clone_info;
MagickRealType
mid;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) QueryColorDatabase("#0000",&clone_info->fill,&image->exception);
resolution.x=DefaultResolution;
resolution.y=DefaultResolution;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/72.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
(void) QueryColorDatabase("red",&clone_info->stroke,
&image->exception);
else
(void) QueryColorDatabase("green",&clone_info->stroke,
&image->exception);
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info);
}
}
(void) QueryColorDatabase("blue",&clone_info->stroke,&image->exception);
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the name of the clip path.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *name)
{
char
clip_mask[MaxTextExtent];
const char
*value;
DrawInfo
*clone_info;
MagickStatusType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
(void) FormatLocaleString(clip_mask,MaxTextExtent,"%s",name);
value=GetImageArtifact(image,clip_mask);
if (value == (const char *) NULL)
return(MagickFalse);
if (image->clip_mask == (Image *) NULL)
{
Image
*clip_mask;
clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
}
(void) QueryColorDatabase("#00000000",&image->clip_mask->background_color,
&image->exception);
image->clip_mask->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(image->clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
draw_info->clip_mask);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,value);
(void) QueryColorDatabase("#ffffff",&clone_info->fill,&image->exception);
clone_info->clip_mask=(char *) NULL;
status=DrawImage(image->clip_mask,clone_info);
status|=NegateImage(image->clip_mask,MagickFalse);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image)
{
DrawInfo
*clone_info;
MagickRealType
length,
maximum_length,
offset,
scale,
total_length;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register ssize_t
i;
register MagickRealType
dx,
dy;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+1UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*(draw_info->dash_pattern[0]-0.5);
offset=draw_info->dash_offset != 0.0 ? scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot((double) dx,dy);
if (length == 0.0)
{
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
for (total_length=0.0; (total_length+length) < maximum_length; )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
j=1;
}
else
{
if ((j+1) > (ssize_t) (2*number_vertices))
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length/maximum_length);
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length/maximum_length);
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status|=DrawStrokePolygon(image,clone_info,dash_polygon);
}
n++;
if (draw_info->dash_pattern[n] == 0.0)
n=0;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status|=DrawStrokePolygon(image,clone_info,dash_polygon);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=InterpretLocaleValue(point,&p);
return((value == 0.0) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->point=point;
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
DrawInfo
**graphic_context;
MagickBooleanType
proceed,
status;
MagickRealType
angle,
factor,
primitive_extent;
PointInfo
point;
PixelPacket
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
length,
number_points;
ssize_t
j,
k,
n;
/*
Ensure the annotation info is valid.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
primitive=FileToString(draw_info->primitive+1,~0,&image->exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(MagickRealType) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(
sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=2047;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
(void) QueryColorDatabase("#000000",&start_color,&image->exception);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetMagickToken(q,&q,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.sx=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.rx=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ry=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.sy=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.tx=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ty=InterpretLocaleValue(token,(char **) NULL);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) QueryColorDatabase(token,&graphic_context[n]->border_color,
&image->exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("clip-path",keyword) == 0)
{
/*
Create clip mask.
*/
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->clip_mask,token);
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetMagickToken(q,&q,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetMagickToken(q,&q,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetMagickToken(q,&q,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern);
else
{
status=QueryColorDatabase(token,&graphic_context[n]->fill,
&image->exception);
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MaxTextExtent);
graphic_context[n]->fill_pattern=
ReadImage(pattern_info,&image->exception);
CatchException(&image->exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->fill.opacity=ClampToQuantum((MagickRealType)
QuantumRange*(1.0-factor*InterpretLocaleValue(token,
(char **) NULL)));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetMagickToken(q,&q,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *)
RelinquishMagickMemory(graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->pointsize=InterpretLocaleValue(token,
(char **) NULL);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetMagickToken(q,&q,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetMagickToken(q,&q,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->weight=StringToUnsignedLong(token);
if (LocaleCompare(token,"all") == 0)
graphic_context[n]->weight=0;
if (LocaleCompare(token,"bold") == 0)
graphic_context[n]->weight=700;
if (LocaleCompare(token,"bolder") == 0)
if (graphic_context[n]->weight <= 800)
graphic_context[n]->weight+=100;
if (LocaleCompare(token,"lighter") == 0)
if (graphic_context[n]->weight >= 100)
graphic_context[n]->weight-=100;
if (LocaleCompare(token,"normal") == 0)
graphic_context[n]->weight=400;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetMagickToken(q,&q,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetMagickToken(q,&q,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->interline_spacing=InterpretLocaleValue(token,
(char **) NULL);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->interword_spacing=InterpretLocaleValue(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->kerning=InterpretLocaleValue(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("matte",keyword) == 0)
{
primitive_type=MattePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->opacity=ClampToQuantum((MagickRealType)
QuantumRange*(1.0-((1.0-QuantumScale*graphic_context[n]->opacity)*
factor*InterpretLocaleValue(token,(char **) NULL))));
graphic_context[n]->fill.opacity=graphic_context[n]->opacity;
graphic_context[n]->stroke.opacity=graphic_context[n]->opacity;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetMagickToken(q,&q,token);
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
break;
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),DrawError,
"UnbalancedGraphicContextPushPop","`%s'",token);
n=0;
break;
}
if (graphic_context[n]->clip_mask != (char *) NULL)
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageClipMask(image,(Image *) NULL);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("pattern",token) == 0)
break;
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetMagickToken(q,&q,token);
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
GetMagickToken(q,&q,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) SetImageArtifact(image,name,token);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
GetMagickToken(q,&q,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetMagickToken(q,&q,token);
(void) CopyMagickString(type,token,MaxTextExtent);
GetMagickToken(q,&q,token);
segment.x1=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.y1=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.x2=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
segment.y2=InterpretLocaleValue(token,(char **) NULL);
if (LocaleCompare(type,"radial") == 0)
{
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
}
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
GetMagickToken(q,&q,token);
(void) CopyMagickString(name,token,MaxTextExtent);
GetMagickToken(q,&q,token);
bounds.x=(ssize_t) ceil(InterpretLocaleValue(token,
(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.y=(ssize_t) ceil(InterpretLocaleValue(token,
(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.width=(size_t) floor(InterpretLocaleValue(token,
(char **) NULL)+0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
bounds.height=(size_t) floor(InterpretLocaleValue(token,
(char **) NULL)+0.5);
for (p=q; *q != '\0'; )
{
GetMagickToken(q,&q,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetMagickToken(q,(const char **) NULL,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
break;
}
if (LocaleCompare("defs",token) == 0)
break;
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=InterpretLocaleValue(token,(char **) NULL);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.sx=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.sy=InterpretLocaleValue(token,(char **) NULL);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=InterpretLocaleValue(token,(char **) NULL);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetMagickToken(q,&q,token);
angle=InterpretLocaleValue(token,(char **) NULL);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelPacket
stop_color;
GetMagickToken(q,&q,token);
(void) QueryColorDatabase(token,&stop_color,&image->exception);
(void) GradientImage(image,LinearGradient,ReflectSpread,
&start_color,&stop_color);
start_color=stop_color;
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern);
else
{
status=QueryColorDatabase(token,&graphic_context[n]->stroke,
&image->exception);
if (status == MagickFalse)
{
ImageInfo
*pattern_info;
pattern_info=AcquireImageInfo();
(void) CopyMagickString(pattern_info->filename,token,
MaxTextExtent);
graphic_context[n]->stroke_pattern=
ReadImage(pattern_info,&image->exception);
CatchException(&image->exception);
pattern_info=DestroyImageInfo(pattern_info);
}
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->stroke_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+1UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
for (j=0; j < x; j++)
{
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->dash_pattern[j]=InterpretLocaleValue(
token,(char **) NULL);
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetMagickToken(q,&q,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->dash_offset=InterpretLocaleValue(token,
(char **) NULL);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetMagickToken(q,&q,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetMagickToken(q,&q,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
GetMagickToken(q,&q,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
graphic_context[n]->stroke.opacity=ClampToQuantum((MagickRealType)
QuantumRange*(1.0-factor*InterpretLocaleValue(token,
(char **) NULL)));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->stroke_width=InterpretLocaleValue(token,
(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetMagickToken(q,&q,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetMagickToken(q,&q,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->text_antialias=
StringToLong(token) != 0 ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetMagickToken(q,&q,token);
(void) QueryColorDatabase(token,&graphic_context[n]->undercolor,
&image->exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetMagickToken(q,&q,token);
affine.tx=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
affine.ty=InterpretLocaleValue(token,(char **) NULL);
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(InterpretLocaleValue(
token,(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(InterpretLocaleValue(
token,(char **) NULL)-0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.width=(size_t) floor(
InterpretLocaleValue(token,(char **) NULL)+0.5);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
graphic_context[n]->viewbox.height=(size_t) floor(
InterpretLocaleValue(token,(char **) NULL)+0.5);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((affine.sx != 1.0) || (affine.rx != 0.0) || (affine.ry != 0.0) ||
(affine.sy != 1.0) || (affine.tx != 0.0) || (affine.ty != 0.0))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",
(int) (q-p),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetMagickToken(q,&q,token);
point.x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,&q,token);
if (*token == ',')
GetMagickToken(q,&q,token);
point.y=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(q,(const char **) NULL,token);
if (*token == ',')
GetMagickToken(q,&q,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
i++;
if (i < (ssize_t) number_points)
continue;
number_points<<=1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
break;
}
}
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
length=primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
length*=5;
break;
}
case RoundRectanglePrimitive:
{
length*=5+8*BezierQuantum;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates > 107)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
length=BezierQuantum*primitive_info[j].coordinates;
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetMagickToken(q,&q,token);
length=1;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=InterpretLocaleValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
length++;
}
length=length*BezierQuantum/2;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
MagickRealType
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
length=2*((size_t) ceil((double) MagickPI*radius))+6*BezierQuantum+360;
break;
}
default:
break;
}
if ((size_t) (i+length) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=length+1;
primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info,
(size_t) number_points,sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
}
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
TraceEllipse(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
break;
case PolygonPrimitive:
{
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(primitive_info+j,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
i=(ssize_t) (j+TracePath(primitive_info+j,token));
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetMagickToken(q,&q,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetMagickToken(q,&q,token);
primitive_info[j].text=AcquireString(token);
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetMagickToken(q,&q,token);
primitive_info[j].text=AcquireString(token);
break;
}
}
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
(void) DrawPrimitive(image,graphic_context[n],primitive_info);
}
if (primitive_info->text != (char *) NULL)
primitive_info->text=(char *) RelinquishMagickMemory(
primitive_info->text);
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o _info: the draw info.
%
*/
static inline MagickRealType GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
MagickRealType
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=1.0/(gamma <= MagickEpsilon ? 1.0 : gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
MagickRealType
length,
offset;
PointInfo
v;
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
length=sqrt(v.x*v.x+v.y*v.y);
if (gradient->spread == RepeatSpread)
return(length);
offset=length/gradient->radius;
return(offset);
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
length;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
MagickPixelPacket
composite,
pixel;
MagickRealType
alpha,
offset;
register IndexPacket
*restrict indexes;
register ssize_t
i,
x;
register PixelPacket
*restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset/=length;
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset/=length;
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
MagickRealType
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=repeat/length;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ?
MagickTrue : MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
MagickPixelCompositeOver(&composite,composite.opacity,&pixel,
pixel.opacity,&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern)
{
char
property[MaxTextExtent];
const char
*geometry,
*path;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info);
image_info=DestroyImageInfo(image_info);
(void) QueryColorDatabase("#00000000",&(*pattern)->background_color,
&image->exception);
(void) SetImageBackgroundColor(*pattern);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) CloneString(&clone_info->primitive,path);
status=DrawImage(*pattern,clone_info);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PathInfo
*restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) ResetMagickMemory(polygon_info,0,GetOpenMPMaximumThreads()*
sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(draw_info,path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static MagickRealType GetOpacityPixel(PolygonInfo *polygon_info,
const MagickRealType mid,const MagickBooleanType fill,
const FillRule fill_rule,const double x,const double y,
MagickRealType *stroke_opacity)
{
MagickRealType
alpha,
beta,
distance,
subpath_opacity;
PointInfo
delta;
register EdgeInfo
*p;
register const PointInfo
*q;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_opacity=0.0;
subpath_opacity=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if (y <= (p->bounds.y1-mid-0.5))
break;
if (y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if ((x <= (p->bounds.x1-mid-0.5)) || (x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if (y <= (p->points[i-1].y-mid-0.5))
break;
if (y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != y)
{
p->scanline=y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta < 0.0)
{
delta.x=x-q->x;
delta.y=y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta > alpha)
{
delta.x=x-(q+1)->x;
delta.y=y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=1.0/alpha;
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_opacity < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_opacity=1.0;
else
{
beta=1.0;
if (distance != 1.0)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25)))
*stroke_opacity=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_opacity=1.0;
continue;
}
if (distance > 1.0)
continue;
if (beta == 0.0)
{
beta=1.0;
if (distance != 1.0)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_opacity < (alpha*alpha))
subpath_opacity=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_opacity >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if (y <= p->bounds.y1)
break;
if ((y > p->bounds.y2) || (x <= p->bounds.x1))
continue;
if (x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
if (y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_opacity);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
fill,
status;
MagickRealType
mid;
PolygonInfo
**restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start,
stop,
y;
/*
Compute bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates == 0)
return(MagickTrue);
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (0)
DrawBoundingRectangles(image,draw_info,polygon_info[0]);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >=
image->columns ? (double) image->columns-1.0 : bounds.x1;
bounds.y1-=(mid+1.0);
bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >=
image->rows ? (double) image->rows-1.0 : bounds.y1;
bounds.x2+=(mid+1.0);
bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >=
image->columns ? (double) image->columns-1.0 : bounds.x2;
bounds.y2+=(mid+1.0);
bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >=
image->rows ? (double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
exception=(&image->exception);
start=(ssize_t) ceil(bounds.x1-0.5);
stop=(ssize_t) floor(bounds.x2+0.5);
image_view=AcquireCacheView(image);
if (primitive_info->coordinates == 1)
{
/*
Draw point.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=(ssize_t) ceil(bounds.y1-0.5); y <= (ssize_t) floor(bounds.y2+0.5); y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
x=start;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop-x+1),
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for ( ; x <= stop; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
(void) GetStrokeColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=(ssize_t) ceil(bounds.y1-0.5); y <= (ssize_t) floor(bounds.y2+0.5); y++)
{
const int
id = GetOpenMPThreadId();
MagickRealType
fill_opacity,
stroke_opacity;
PixelPacket
fill_color,
stroke_color;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,start,y,(size_t) (stop-
start+1),1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start; x <= stop; x++)
{
/*
Fill and/or stroke.
*/
fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill,
draw_info->fill_rule,(double) x,(double) y,&stroke_opacity);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0;
stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x,y,&fill_color);
fill_opacity=(MagickRealType) (QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity));
MagickCompositeOver(&fill_color,fill_opacity,q,(MagickRealType)
q->opacity,q);
(void) GetStrokeColor(draw_info,x,y,&stroke_color);
stroke_opacity=(MagickRealType) (QuantumRange-stroke_opacity*
(QuantumRange-stroke_color.opacity));
MagickCompositeOver(&stroke_color,stroke_opacity,q,(MagickRealType)
q->opacity,q);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) > MagickEpsilon) ||
(fabs(q.y-point.y) > MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) > MagickEpsilon) ||
(fabs(p.y-point.y) > MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g %g %g %g %g %g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
exception=(&image->exception);
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireCacheView(image);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
PixelPacket
fill_color;
PixelPacket
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
target;
(void) GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
(void) FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
pixel;
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel,
target;
(void) GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
(void) FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status=AnnotateImage(image,clone_info);
clone_info=DestroyDrawInfo(clone_info);
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_image=ReadInlineImage(clone_info,primitive_info->text,
&image->exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MaxTextExtent);
composite_image=ReadImage(clone_info,&image->exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_image == (Image *) NULL)
break;
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,geometry);
}
if (composite_image->matte == MagickFalse)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel);
if (draw_info->opacity != OpaqueOpacity)
(void) SetImageOpacity(composite_image,draw_info->opacity);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,
&image->exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if (draw_info->compose == OverCompositeOp)
(void) DrawAffineImage(image,composite_image,&affine);
else
(void) CompositeImage(image,draw_info->compose,composite_image,
geometry.x,geometry.y);
composite_image=DestroyImage(composite_image);
break;
}
default:
{
MagickRealType
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(draw_info->dash_pattern[0] != 0.0) &&
((scale*draw_info->stroke_width) > MagickEpsilon) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
closed_path=
(primitive_info[i-1].point.x == primitive_info[0].point.x) &&
(primitive_info[i-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
i=(ssize_t) primitive_info[0].coordinates;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
status|=DrawStrokePolygon(image,draw_info,primitive_info);
break;
}
status=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
}
image_view=DestroyCacheView(image_view);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=(double) (10.0*MagickEpsilon);
linecap[2].point.x+=(double) (10.0*MagickEpsilon);
linecap[2].point.y+=(double) (10.0*MagickEpsilon);
linecap[3].point.y+=(double) (10.0*MagickEpsilon);
linecap[4].primitive=UndefinedPrimitive;
(void) DrawPolygonPrimitive(image,draw_info,linecap);
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path,
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
stroke_polygon=TraceStrokePolygon(draw_info,p);
status=DrawPolygonPrimitive(image,clone_info,stroke_polygon);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
q=p+p->coordinates-1;
closed_path=(q->point.x == p->point.x) && (q->point.y == p->point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
DrawRoundLinecap(image,draw_info,p);
DrawRoundLinecap(image,draw_info,q);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) ResetMagickMemory(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase("#000F",&draw_info->fill,exception);
(void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->opacity=OpaqueOpacity;
draw_info->fill_rule=EvenOddRule;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
draw_info->pointsize=12.0;
if (clone_info->pointsize != 0.0)
draw_info->pointsize=clone_info->pointsize;
draw_info->undercolor.opacity=(Quantum) TransparentOpacity;
draw_info->border_color=clone_info->border_color;
draw_info->compose=OverCompositeOp;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
draw_info->render=MagickTrue;
draw_info->debug=IsEventLogging();
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=InterpretLocaleValue(option,(char **) NULL);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=InterpretLocaleValue(option,(char **) NULL);
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=InterpretLocaleValue(option,(char **) NULL);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->fill,exception);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->stroke,exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=InterpretLocaleValue(option,(char **) NULL);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->undercolor,exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline MagickRealType Permutate(const ssize_t n,const ssize_t k)
{
MagickRealType
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radii;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radii.x=fabs(center.x-start.x);
radii.y=fabs(center.y-start.y);
TraceEllipse(primitive_info,center,radii,degrees);
}
static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const MagickRealType angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
MagickRealType
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register MagickRealType
cosine,
sine;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
if ((start.x == end.x) && (start.y == end.y))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x == 0.0) || (radii.y == 0.0))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=1.0/(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=(MagickRealType) (2.0*MagickPI);
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=(MagickRealType) (2.0*MagickPI);
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(p,4);
p+=p->coordinates;
}
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(PrimitiveInfo *primitive_info,
const size_t number_coordinates)
{
MagickRealType
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coeficients.
*/
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (MagickRealType) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (MagickRealType) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
coefficients=(MagickRealType *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (MagickRealType *) NULL) ||
(points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(MagickRealType *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
MagickRealType
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(primitive_info,start,offset,degrees);
}
static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo stop,const PointInfo degrees)
{
MagickRealType
delta,
step,
y;
PointInfo
angle,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
if ((stop.x == 0.0) && (stop.y == 0.0))
{
TracePoint(primitive_info,start);
return;
}
delta=2.0/MagickMax(stop.x,stop.y);
step=(MagickRealType) (MagickPI/8.0);
if ((delta >= 0.0) && (delta < (MagickRealType) (MagickPI/8.0)))
step=(MagickRealType) (MagickPI/(4*(MagickPI/delta/2+0.5)));
angle.x=DegreesToRadians(degrees.x);
y=degrees.y;
while (y < degrees.x)
y+=360.0;
angle.y=(double) (DegreesToRadians(y)-MagickEpsilon);
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) <= MagickEpsilon) &&
(fabs(start.y-end.y) <= MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
}
static size_t TracePath(PrimitiveInfo *primitive_info,const char *path)
{
char
token[MaxTextExtent];
const char
*p;
int
attribute,
last_attribute;
MagickRealType
x,
y;
PointInfo
end,
points[4],
point,
start;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
attribute=0;
point.x=0.0;
point.y=0.0;
start.x=0.0;
start.y=0.0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
MagickBooleanType
large_arc,
sweep;
MagickRealType
angle;
PointInfo
arc;
/*
Compute arc points.
*/
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
arc.x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
arc.y=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
angle=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(q,point,end,arc,angle,large_arc,sweep);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
if (q != primitive_info)
{
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
}
i=0;
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
TracePoint(q,point);
q+=q->coordinates;
if ((i != 0) && (attribute == (int) 'M'))
{
TracePoint(q,point);
q+=q->coordinates;
}
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Compute bezier points.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=points[2];
points[1]=points[3];
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(q,4);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Compute bezier points.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
x=InterpretLocaleValue(token,(char **) NULL);
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=points[2];
points[1]=points[3];
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(q,3);
q+=q->coordinates;
point=end;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
do
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
y=InterpretLocaleValue(token,(char **) NULL);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
TracePoint(q,point);
q+=q->coordinates;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
point=start;
TracePoint(q,point);
q+=q->coordinates;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
z_count++;
break;
}
default:
{
if (isalpha((int) ((unsigned char) attribute)) != 0)
(void) FormatLocaleFile(stderr,"attribute not recognized: %c\n",
attribute);
break;
}
}
}
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
offset,
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
offset.x=fabs(end.x-start.x);
offset.y=fabs(end.y-start.y);
if (arc.x > (0.5*offset.x))
arc.x=0.5*offset.x;
if (arc.y > (0.5*offset.y))
arc.y=0.5*offset.y;
point.x=start.x+offset.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+offset.x-arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+offset.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(p,point,arc,degrees);
p+=p->coordinates;
TracePoint(p,primitive_info->point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const MagickRealType offset)
{
MagickRealType
distance;
register MagickRealType
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
LineSegment
dx,
dy,
inverse_slope,
slope,
theta;
MagickBooleanType
closed_path;
MagickRealType
delta_theta,
dot_product,
mid,
miterlimit;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) ||
(polygon_primitive == (PrimitiveInfo *) NULL))
return((PrimitiveInfo *) NULL);
(void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t)
number_vertices*sizeof(*polygon_primitive));
closed_path=
(primitive_info[number_vertices-1].point.x == primitive_info[0].point.x) &&
(primitive_info[number_vertices-1].point.y == primitive_info[0].point.y) ?
MagickTrue : MagickFalse;
if ((draw_info->linejoin == RoundJoin) ||
((draw_info->linejoin == MiterJoin) && (closed_path != MagickFalse)))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
n=(ssize_t) number_vertices-1L;
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) <= MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) <= MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(MagickRealType) (draw_info->miterlimit*draw_info->miterlimit*
mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) <= MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) <= MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360))
{
max_strokes+=6*BezierQuantum+360;
path_p=(PointInfo *) ResizeQuantumMemory(path_p,(size_t) max_strokes,
sizeof(*path_p));
path_q=(PointInfo *) ResizeQuantumMemory(path_q,(size_t) max_strokes,
sizeof(*path_q));
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL))
{
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
}
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=(MagickRealType) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(MagickRealType) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=(MagickRealType) (2.0*MagickPI);
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(MagickRealType) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
leet_cc_fmt_plug.c | /*
* Cracker for leet.cc hashes.
*
* hsh = bin2hex(hash("sha512", $password . $salt, true) ^ hash("whirlpool", $salt . $password, true))
* $salt == username
*
* Input hash format: username:hash
*
* This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_leet;
#elif FMT_REGISTERS_H
john_register_one(&fmt_leet);
#else
#include "arch.h"
#include "openssl_local_overrides.h"
#include <openssl/opensslv.h>
#include <string.h>
#if (AC_BUILT && HAVE_WHIRLPOOL) || \
(!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL)
#include <openssl/whrlpool.h>
#define WP_TYPE "OpenSSL"
#define sph_whirlpool_context WHIRLPOOL_CTX
#define sph_whirlpool_init(a) WHIRLPOOL_Init(a)
#define sph_whirlpool(a,b,c) WHIRLPOOL_Update(a,b,c)
#define sph_whirlpool_close(b,a) WHIRLPOOL_Final(a,b)
#else
#define WP_TYPE "SPH"
#include "sph_whirlpool.h"
#endif
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
#ifdef _OPENMP
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 128 // tuned on Core i7-6600U
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#ifdef SIMD_COEF_64
#define SHA512_TYPE SHA512_ALGORITHM_NAME
#else
#define SHA512_TYPE "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#ifdef SIMD_COEF_64
#define PLAINTEXT_LENGTH (111-32)
#define MAX_SALT_LEN 32
#else
#define PLAINTEXT_LENGTH 125
#define MAX_SALT_LEN 256
#endif
#define FORMAT_LABEL "leet"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA-512(" SHA512_TYPE ") + Whirlpool(" WP_TYPE "/" ARCH_BITS_STR ")"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 64
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint64_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests leet_tests[] = {
{"salt$f86036a85e3ff84e73bf10769011ecdbccbf5aaed9df0240310776b42f5bb8776e612ab15a78bbfc39e867448a08337d97427e182e72922bbaa903ee75b2bfd4", "password"},
{"Babeface$3e6380026fc262465934fd5352659c874e611cbf3229cdbf1407c3bae4c6f0b9c437470d202bccc65cf82faf883d299f1ab30ed841cd8f2472c58f4f05ac6ca3", "john"},
{"user$b8baf965f515e41c9bf4bc31f0652f27b746c3155f79bc39d2ba8557a8e4a803fd4c0418d577957044bd403d98847750231cb9f03fb213dcddf73304180309dc", "ripper"},
{"harvey$581e6f9aee99df55bb815bb608707a640a8deae3bad343d0421822518f2c9d8a053221356894628e30f70bf91d36ca2a7300407ec6686fefaa46cbad07b0f78e", "openwall"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static uint64_t (*crypt_out)[1];
static struct custom_salt {
int saltlen;
unsigned char salt[MAX_SALT_LEN];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
// salt (username) is added to the ciphertext in the prepare function
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
q = strchr(p, '$'); // end of salt
if (!q)
return 0;
if (q - p > 256)
return 0;
if (q - p == 0)
return 0;
q = strrchr(ciphertext, '$') + 1;
if (strlen(q) != BINARY_SIZE * 2)
goto err;
if (!ishex(q))
goto err;
return 1;
err:
return 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char* cp;
if (!split_fields[0])
return split_fields[1];
if (strnlen(split_fields[1], BINARY_SIZE * 2 + 1) != BINARY_SIZE * 2)
return split_fields[1];
cp = mem_alloc(strlen(split_fields[0]) + strlen(split_fields[1]) + 2);
sprintf(cp, "%s$%s", split_fields[0], split_fields[1]);
if (valid(cp, self)) {
return cp;
}
MEM_FREE(cp);
return split_fields[1];
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, sizeof(cs));
p = ciphertext;
q = strrchr(ciphertext, '$');
strncpy((char*)cs.salt, p, q - p);
cs.saltlen = q - p;
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{ static union {
unsigned char c[BINARY_SIZE+1];
uint64_t dummy;
} buf;
int i;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
/* using our own binary_hash_x() functions allows us to avoid BE / LE issues */
static int binary_hash_0(void *binary) { return *((uint64_t *)binary) & PH_MASK_0; }
static int binary_hash_1(void *binary) { return *((uint64_t *)binary) & PH_MASK_1; }
static int binary_hash_2(void *binary) { return *((uint64_t *)binary) & PH_MASK_2; }
static int binary_hash_3(void *binary) { return *((uint64_t *)binary) & PH_MASK_3; }
static int binary_hash_4(void *binary) { return *((uint64_t *)binary) & PH_MASK_4; }
static int binary_hash_5(void *binary) { return *((uint64_t *)binary) & PH_MASK_5; }
static int binary_hash_6(void *binary) { return *((uint64_t *)binary) & PH_MASK_6; }
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
sph_whirlpool_context wctx;
int i;
union {
unsigned char buf[BINARY_SIZE];
uint64_t p64[1];
} output1[MAX_KEYS_PER_CRYPT], output2;
#ifdef SIMD_COEF_64
// Not sure why JTR_ALIGN(MEM_ALIGN_SIMD) does n ot work here
// but if used, it cores travis-ci, so we use mem_align instead
unsigned char _in[8*16*MAX_KEYS_PER_CRYPT+MEM_ALIGN_SIMD];
unsigned char _out[8*8*MAX_KEYS_PER_CRYPT+MEM_ALIGN_SIMD];
uint64_t *in = mem_align(_in, MEM_ALIGN_SIMD);
uint64_t *out = mem_align(_out, MEM_ALIGN_SIMD);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
char *cp = &((char*)in)[128*i];
memcpy(cp, saved_key[index+i], saved_len[index+i]);
memcpy(&cp[saved_len[index+i]], cur_salt->salt, cur_salt->saltlen);
cp[saved_len[index+i]+cur_salt->saltlen] = 0x80;
in[i*16+15] = (saved_len[index+i]+cur_salt->saltlen)<<3;
memset(&cp[saved_len[index+i]+cur_salt->saltlen+1], 0, 120-(saved_len[index+i]+cur_salt->saltlen+1));
}
SIMDSHA512body(in, out, NULL, SSEi_FLAT_IN);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
#if ARCH_LITTLE_ENDIAN==1
output1[i].p64[0] = JOHNSWAP64(out[((i/SIMD_COEF_64)*8*SIMD_COEF_64+i%SIMD_COEF_64)]);
#else
output1[i].p64[0] = out[((i/SIMD_COEF_64)*8*SIMD_COEF_64+i%SIMD_COEF_64)];
#endif
#else
SHA512_CTX sctx;
SHA512_Init(&sctx);
SHA512_Update(&sctx, saved_key[index], saved_len[index]);
SHA512_Update(&sctx, cur_salt->salt, cur_salt->saltlen);
SHA512_Final(output1[0].buf, &sctx);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
sph_whirlpool_init(&wctx);
sph_whirlpool(&wctx, cur_salt->salt, cur_salt->saltlen);
sph_whirlpool(&wctx, saved_key[index+i], saved_len[index+i]);
sph_whirlpool_close(&wctx, output2.buf);
crypt_out[index+i][0] = output1[i].p64[0] ^ output2.p64[0];
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (((uint64_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return ((uint64_t*)binary)[0] == crypt_out[index][0];
}
static int cmp_exact(char *source, int index)
{
// don't worry about SIMD here.
// we already are 64 bit 'sure'. This extra check
// is not really needed, but does not hurt much
SHA512_CTX sctx;
int i;
void *bin = get_binary(source);
sph_whirlpool_context wctx;
unsigned char output1[BINARY_SIZE], output2[BINARY_SIZE];
SHA512_Init(&sctx);
SHA512_Update(&sctx, saved_key[index], saved_len[index]);
SHA512_Update(&sctx, cur_salt->salt, cur_salt->saltlen);
SHA512_Final(output1, &sctx);
sph_whirlpool_init(&wctx);
sph_whirlpool(&wctx, cur_salt->salt, cur_salt->saltlen);
sph_whirlpool(&wctx, saved_key[index], saved_len[index]);
sph_whirlpool_close(&wctx, output2);
for (i = 0; i < BINARY_SIZE; ++i)
output1[i] ^= output2[i];
return !memcmp(output1, bin, BINARY_SIZE);
}
static void leet_set_key(char *key, int index)
{
saved_len[index] =
strnzcpyn(saved_key[index], key, sizeof(saved_key[index]));
}
static char *get_key(int index)
{
return saved_key[index];
}
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
unsigned int hash = 5381;
struct custom_salt *fck = (struct custom_salt *)salt;
unsigned char *s = fck->salt;
int length = fck->saltlen / 4;
while (length) {
hash = ((hash << 5) + hash) ^ *s++;
length--;
}
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_leet = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
NULL,
},
{ NULL },
leet_tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
NULL
},
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
leet_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
bit_vector_functions.h | #ifndef BIT_VECTOR_FUNCTIONS_H
#define BIT_VECTOR_FUNCTIONS_H
#include <vector>
#include <bitset>
#include "helper/confusion.h"
#include "config.h"
#include "io_and_allocation.hpp"
#include "updates_and_measures.cuh"
using std::vector;
template<typename bit_vector_t, typename index_t>
size_t computeHammingDistanceCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
size_t error = 0;
#pragma omp parallel for reduction(+:error)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
error += product ^ C_ij;
}
}
return error;
}
template<typename bit_vector_t>
int nonzeroDimension(vector<bit_vector_t>& Ab)
{
bit_vector_t columns = 0;
for(auto& a : Ab) columns |= a;
std::bitset<std::numeric_limits<bit_vector_t>::digits> bits(columns);
return bits.count();
}
template<typename bit_vector_t, typename index_t>
confusion_matrix computeErrorsCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
size_t true_positives = 0;
size_t true_negatives = 0;
size_t false_positives = 0;
size_t false_negatives = 0;
#pragma omp parallel for reduction(+:true_positives) \
reduction(+:true_negatives) \
reduction(+:false_positives) \
reduction(+:false_negatives)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
true_positives += C_ij & product;
true_negatives += !(C_ij | product);
false_positives += (!C_ij) & product;
false_negatives += C_ij & !product;
}
}
return confusion_matrix(true_positives, true_negatives, false_positives, false_negatives);
}
template<typename bit_vector_t, typename index_t>
size_t computeTruePositiveCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
size_t true_positives = 0;
#pragma omp parallel for reduction(+:true_positives)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
if(product & C_ij) true_positives++;
}
}
return true_positives;
}
template<typename bit_vector_t, typename index_t>
float computeJaccardCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
float jaccard = 0;
#pragma omp parallel for reduction(+:jaccard)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
size_t true_positives = 0;
size_t false_positives = 0;
size_t false_negatives = 0;
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
if(product) {
if(C_ij)
true_positives++;
else
false_positives++;
} else {
if(C_ij)
false_negatives++;
}
}
jaccard += (float) true_positives / (true_positives + false_positives + false_negatives);
}
return jaccard;
}
template<typename bit_factor_t, typename bit_matrix_t, typename index_t, typename error_t>
error_t computeDistanceCPU(
const vector<bit_factor_t> &Ab,
const vector<bit_factor_t> &Bb,
const vector<bit_matrix_t> &Cb,
const index_t height,
const index_t width,
const error_t weight)
{
error_t error = 0;
#pragma omp parallel for reduction(+:error)
for(index_t i=0; i < height; ++i) {
uint32_t A_i = Ab[i];
for(index_t j=0; j < width; ++j) {
const int product = (A_i & Bb[j]) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
error += error_measure(product, C_ij, weight);
}
}
return error;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeDensitiesRows(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> density_rows(height);
#pragma omp parallel for
for(index_t i=0; i<height; ++i) {
size_t nonZeroCount = 0;
for(index_t j=0; j<width; ++j) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
density_rows[i] = (error_t) nonZeroCount / width;
}
return density_rows;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeDensitiesCols(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> density_cols(width);
#pragma omp parallel for
for(index_t j=0; j<width; ++j) {
size_t nonZeroCount = 0;
for(index_t i=0; i<height; ++i) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
density_cols[j] = (error_t) nonZeroCount / height;
}
return density_cols;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeInverseDensitiesRows(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> inverse_density_rows(height);
#pragma omp parallel for
for(index_t i=0; i<height; ++i) {
size_t nonZeroCount = 0;
for(index_t j=0; j<width; ++j) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
if(nonZeroCount == 0) nonZeroCount++;
inverse_density_rows[i] = (error_t) width / nonZeroCount;
}
return inverse_density_rows;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeInverseDensitiesCols(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> inverse_density_cols(width);
#pragma omp parallel for
for(index_t j=0; j<width; ++j) {
size_t nonZeroCount = 0;
for(index_t i=0; i<height; ++i) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
if(nonZeroCount == 0) nonZeroCount++;
inverse_density_cols[j] = (error_t) height / nonZeroCount;
}
return inverse_density_cols;
}
template<typename bit_vector_t, typename index_t>
void updateWholeColumn(
vector<bit_vector_t> &Ab,
const index_t size_A,
const uint8_t factorDim,
const uint8_t column,
const float density,
const uint32_t seed)
{
updateColumnPart(Ab, size_A, factorDim, column, density, 0, size_A, seed);
}
template<typename bit_vector_t, typename index_t>
void updateColumnPart(
vector<bit_vector_t> &Ab,
const index_t size_A,
const uint8_t factorDim,
const uint8_t column,
const float density,
const index_t startline,
const index_t numlines,
const uint32_t seed)
{
const double threshold = getInitChance(density, factorDim);
#pragma omp for
for (index_t id = 0; id < numlines; ++id) {
const index_t i = (startline + id) % size_A;
fast_kiss_state32_t state;
state = get_initial_fast_kiss_state32(seed + i);
const bool set_one = fast_kiss32(state) < threshold * UINT32_MAX;
if (set_one)
Ab[i] |= 1 << column;
else //set 0
Ab[i] &= ~(1 << column);
}
}
template<bool transpose, typename bit_vector_t, typename index_t>
confusion_matrix optimizeWholeColumn(
vector<bit_vector_t> &Ab,
const index_t size_A,
const vector<bit_vector_t> &Bb,
const index_t size_B,
const vector<bit_vector_t> &Cb,
const uint8_t factorDim,
const uint8_t k)
{
confusion_matrix confusion_new;
#pragma omp for
for (index_t i = 0; i < size_A; ++i) {
const bit_vector_t A_i_0 = Ab[i] & ~(1 << k);
const bit_vector_t A_i_1 = Ab[i] | (1 << k);
confusion_matrix confusion_0;
confusion_matrix confusion_1;
for(index_t j=0; j < size_B; ++j) {
const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j;
const index_t vecLane = transpose ? j % 32 : i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
const int product_0 = (A_i_0 & Bb[j]) ? 1 : 0;
const int product_1 = (A_i_1 & Bb[j]) ? 1 : 0;
confusion_0.TP += C_ij & product_0;
confusion_1.TP += C_ij & product_1;
confusion_0.FN += C_ij & !product_0;
confusion_1.FN += C_ij & !product_1;
confusion_0.FP += (!C_ij) & product_0;
confusion_1.FP += (!C_ij) & product_1;
}
if(confusion_0.total_error() <= confusion_1.total_error()) {
Ab[i] = A_i_0;
confusion_new.TP += confusion_0.TP;
confusion_new.FN += confusion_0.FN;
confusion_new.FP += confusion_0.FP;
}
else {
Ab[i] = A_i_1;
confusion_new.TP += confusion_1.TP;
confusion_new.FN += confusion_1.FN;
confusion_new.FP += confusion_1.FP;
}
}
return confusion_new;
}
template<bool transpose, typename bit_vector_t, typename index_t>
confusion_matrix updateLinesJaccardCPU(vector<bit_vector_t> &Ab,
const index_t size_A,
const vector<bit_vector_t> &Bb,
const index_t size_B,
const vector<bit_vector_t> &Cb,
const uint8_t factorDim,
const index_t startline,
const index_t numlines,
const uint32_t seed,
const float temperature,
const float flipManyChance,
const uint32_t flipManyDepth,
const confusion_matrix confusion)
{
confusion_matrix confusion_update;
#pragma omp for
for(index_t id=0; id < numlines; ++id) {
const index_t i = (startline + id) % size_A;
fast_kiss_state32_t state;
state = get_initial_fast_kiss_state32(seed + id);
const bit_vector_t A_i = Ab[i];
const bit_vector_t A_i_draw = get_flip_mask_many(factorDim, state, flipManyDepth);
const bit_vector_t A_i_flip = A_i ^ A_i_draw;
confusion_matrix confusion_old;
confusion_matrix confusion_draw;
confusion_matrix confusion_flip;
for(index_t j=0; j < size_B; ++j) {
const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j;
const index_t vecLane = transpose ? j % 32 : i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
const int product_old = (A_i & Bb[j]) ? 1 : 0;
const int product_draw = (A_i_draw & Bb[j]) ? 1 : 0;
const int product_flip = (A_i_flip & Bb[j]) ? 1 : 0;
confusion_old.TP += C_ij & product_old;
confusion_draw.TP += C_ij & product_draw;
confusion_flip.TP += C_ij & product_flip;
confusion_old.FN += C_ij & !product_old;
confusion_draw.FN += C_ij & !product_draw;
confusion_flip.FN += C_ij & !product_flip;
confusion_old.FP += (!C_ij) & product_old;
confusion_draw.FP += (!C_ij) & product_draw;
confusion_flip.FP += (!C_ij) & product_flip;
}
const size_t all_tp_draw = confusion.TP - confusion_old.TP + confusion_draw.TP;
const size_t all_tp_flip = confusion.TP - confusion_old.TP + confusion_flip.TP;
const float jaccard_old = 1.0f * confusion.TP / (confusion.TP + 3*confusion_old.FN + confusion_old.FP);
const float jaccard_draw = 1.0f * all_tp_draw / (all_tp_draw + 3*confusion_draw.FN + confusion_draw.FP);
const float jaccard_flip = 1.0f * all_tp_flip / (all_tp_flip + 3*confusion_flip.FN + confusion_flip.FP);
bit_vector_t A_i_new = A_i_draw;
float jaccard_new = jaccard_draw;
confusion_matrix& confusion_new = confusion_draw;
if(jaccard_draw > jaccard_old) {
if(jaccard_flip > jaccard_draw) {
A_i_new = A_i_flip;
jaccard_new = jaccard_flip;
confusion_new = confusion_flip;
}
} else {
if(jaccard_flip > jaccard_old) {
A_i_new = A_i_flip;
jaccard_new = jaccard_flip;
confusion_new = confusion_flip;
} else {
const uint32_t coin = fast_kiss32(state) % 2;
if(coin) {
A_i_new = A_i_flip;
jaccard_new = jaccard_flip;
confusion_new = confusion_flip;
}
}
}
if (metro(state, jaccard_old - jaccard_new, temperature)) {
Ab[i] = A_i_new;
confusion_update.TP += confusion_new.TP - confusion_old.TP;
confusion_update.FP += confusion_new.FP - confusion_old.FP;
confusion_update.FN += confusion_new.FN - confusion_old.FN;
}
}
return confusion_update;
}
template<bool transpose, typename bit_vector_t, typename index_t, typename error_t>
int vectorMatrixMultCompareLineCPU(vector<bit_vector_t> &Ab,
const index_t size_A,
const vector<bit_vector_t> &Bb,
const index_t size_B,
const vector<bit_vector_t> &Cb,
const uint8_t factorDim,
const index_t startline,
const index_t numlines,
const uint32_t seed,
const float temperature,
const float flipManyChance,
const uint32_t flipManyDepth,
const error_t weight)
{
error_t error_update = 0;
#pragma omp for
// #pragma omp parallel for reduction(+:error_update)
for(index_t id=0; id < numlines; ++id) {
const index_t i = (startline + id) % size_A;
fast_kiss_state32_t state;
state = get_initial_fast_kiss_state32(seed + id);
const bit_vector_t A_i = Ab[i];
bit_vector_t A_i_changed = Ab[i] ^ get_flip_mask(factorDim, state, flipManyChance, flipManyDepth);
error_t error = 0;
for(index_t j=0; j < size_B; ++j) {
const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j;
const index_t vecLane = transpose ? j % 32 : i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
const int product_old = (A_i & Bb[j]) ? 1 : 0;
const int product_new = (A_i_changed & Bb[j]) ? 1 : 0;
error += error_measure(product_new, C_ij, weight)
- error_measure(product_old, C_ij, weight);
}
if (metro(state, error, temperature, size_B)) {
Ab[i] = A_i_changed;
error_update += error;
}
}
return error_update;
}
template <typename index_t>
struct coo {
coo(index_t x, index_t y) : x_{x}, y_{y} {}
index_t x_;
index_t y_;
};
template <typename bit_vector_t, typename index_t>
vector<coo<index_t>> computeProductCOO(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const index_t height,
const index_t width)
{
vector<coo<index_t>> C;
#pragma omp parallel for ordered schedule(static,1)
for(index_t i=0; i < height; ++i) {
bit_vector_t row = Ab[i];
vector<coo<index_t>> Ci;
for(index_t j=0; j < width; ++j) {
if(row & Bb[j])
Ci.emplace_back(i,j);
}
#pragma omp ordered
C.insert(C.end(), Ci.begin(), Ci.end());
}
return C;
}
#endif
|
alifold.c | /*
* minimum free energy folding
* for a set of aligned sequences
*
* c Ivo Hofacker
*
* Vienna RNA package
*/
/**
*** \file alifold.c
**/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include <limits.h>
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/datastructures/basic.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/eval.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/ribo.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/alifold.h"
#include "ViennaRNA/utils/alignments.h"
#include "ViennaRNA/loops/all.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
#define MAXSECTORS 500 /* dimension for a backtrack array */
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE float
wrap_alifold(const char **strings,
char *structure,
vrna_param_t *parameters,
int is_constrained,
int is_circular);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/*
* ###########################################
* # deprecated functions below #
*###########################################
*/
PRIVATE float
wrap_alifold(const char **strings,
char *structure,
vrna_param_t *parameters,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_param_t *P;
float mfe;
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
/* we need the parameter structure for hard constraints */
if (parameters) {
P = vrna_params_copy(parameters);
} else {
vrna_md_t md;
set_model_details(&md);
md.temperature = temperature;
P = vrna_params(&md);
}
P->model_details.circ = is_circular;
vc = vrna_fold_compound_comparative(strings, &(P->model_details), VRNA_OPTION_DEFAULT);
if (parameters) {
/* replace params if necessary */
free(vc->params);
vc->params = P;
} else {
free(P);
}
/* handle hard constraints in pseudo dot-bracket format if passed via simple interface */
if (is_constrained && structure)
vrna_constraints_add(vc, (const char *)structure, VRNA_CONSTRAINT_DB_DEFAULT);
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
/* call mfe() function without backtracking */
mfe = vrna_mfe(vc, NULL);
/* backtrack structure */
if (structure && vc->params->model_details.backtrack) {
char *ss;
int length;
sect bt_stack[MAXSECTORS];
vrna_bp_stack_t *bp;
length = vc->length;
bp = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (4 * (1 + length / 2))); /* add a guess of how many G's may be involved in a G quadruplex */
vrna_backtrack_from_intervals(vc, bp, bt_stack, 0);
ss = vrna_db_from_bp_stack(bp, length);
strncpy(structure, ss, length + 1);
free(ss);
if (base_pair)
free(base_pair);
base_pair = bp;
}
return mfe;
}
PUBLIC void
free_alifold_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
}
}
PUBLIC float
alifold(const char **strings,
char *structure)
{
return wrap_alifold(strings, structure, NULL, fold_constrained, 0);
}
PUBLIC float
circalifold(const char **strings,
char *structure)
{
return wrap_alifold(strings, structure, NULL, fold_constrained, 1);
}
PUBLIC void
update_alifold_params(void)
{
vrna_fold_compound_t *v;
if (backward_compat_compound && backward_compat) {
v = backward_compat_compound;
if (v->params)
free(v->params);
vrna_md_t md;
set_model_details(&md);
v->params = vrna_params(&md);
}
}
PUBLIC float
energy_of_ali_gquad_structure(const char **sequences,
const char *structure,
int n_seq,
float *energy)
{
if (sequences[0] != NULL) {
vrna_fold_compound_t *vc;
vrna_md_t md;
set_model_details(&md);
md.gquad = 1;
vc = vrna_fold_compound_comparative(sequences, &md, VRNA_OPTION_EVAL_ONLY);
energy[0] = vrna_eval_structure(vc, structure);
energy[1] = vrna_eval_covar_structure(vc, structure);
vrna_fold_compound_free(vc);
} else {
vrna_message_warning("energy_of_ali_gquad_structure: "
"no sequences in alignment!");
return (float)(INF / 100.);
}
return energy[0];
}
PUBLIC float
energy_of_alistruct(const char **sequences,
const char *structure,
int n_seq,
float *energy)
{
if (sequences[0] != NULL) {
vrna_fold_compound_t *vc;
vrna_md_t md;
set_model_details(&md);
vc = vrna_fold_compound_comparative(sequences, &md, VRNA_OPTION_EVAL_ONLY);
energy[0] = vrna_eval_structure(vc, structure);
energy[1] = vrna_eval_covar_structure(vc, structure);
vrna_fold_compound_free(vc);
} else {
vrna_message_warning("energy_of_alistruct(): "
"no sequences in alignment!");
return (float)(INF / 100.);
}
return energy[0];
}
#endif
|
ParFriends.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _PAR_FRIENDS_H_
#define _PAR_FRIENDS_H_
#include "mpi.h"
#include <iostream>
#include <cstdarg>
#include "SpParMat.h"
#include "SpParMat3D.h"
#include "SpParHelper.h"
#include "MPIType.h"
#include "Friends.h"
#include "OptBuf.h"
#include "mtSpGEMM.h"
#include "MultiwayMerge.h"
#include <unistd.h>
#include <type_traits>
namespace combblas {
template <class IT, class NT, class DER>
class SpParMat;
/*************************************************************************************************/
/**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/
/*************************************************************************************************/
/**
** Concatenate all the FullyDistVec<IT,NT> objects into a single one
**/
template <typename IT, typename NT>
FullyDistVec<IT,NT> Concatenate ( std::vector< FullyDistVec<IT,NT> > & vecs)
{
if(vecs.size() < 1)
{
SpParHelper::Print("Warning: Nothing to concatenate, returning empty ");
return FullyDistVec<IT,NT>();
}
else if (vecs.size() < 2)
{
return vecs[1];
}
else
{
typename std::vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin();
std::shared_ptr<CommGrid> commGridPtr = it->getcommgrid();
MPI_Comm World = commGridPtr->GetWorld();
IT nglen = it->TotalLength(); // new global length
IT cumloclen = it->MyLocLength(); // existing cumulative local lengths
++it;
for(; it != vecs.end(); ++it)
{
if(*(commGridPtr) != *(it->getcommgrid()))
{
SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n");
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
nglen += it->TotalLength();
cumloclen += it->MyLocLength();
}
FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT());
int nprocs = commGridPtr->GetSize();
std::vector< std::vector< NT > > data(nprocs);
std::vector< std::vector< IT > > inds(nprocs);
IT gloffset = 0;
for(it = vecs.begin(); it != vecs.end(); ++it)
{
IT loclen = it->LocArrSize();
for(IT i=0; i < loclen; ++i)
{
IT locind;
IT loffset = it->LengthUntil();
int owner = ConCat.Owner(gloffset+loffset+i, locind);
data[owner].push_back(it->arr[i]);
inds[owner].push_back(locind);
}
gloffset += it->TotalLength();
}
int * sendcnt = new int[nprocs];
int * sdispls = new int[nprocs];
for(int i=0; i<nprocs; ++i)
sendcnt[i] = (int) data[i].size();
int * rdispls = new int[nprocs];
int * recvcnt = new int[nprocs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<nprocs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0));
NT * senddatabuf = new NT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]);
std::vector<NT>().swap(data[i]); // delete data vectors
}
NT * recvdatabuf = new NT[totrecv];
MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data
delete [] senddatabuf;
IT * sendindsbuf = new IT[cumloclen];
for(int i=0; i<nprocs; ++i)
{
std::copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]);
std::vector<IT>().swap(inds[i]); // delete inds vectors
}
IT * recvindsbuf = new IT[totrecv];
MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds
DeleteAll(sendindsbuf, sendcnt, sdispls);
for(int i=0; i<nprocs; ++i)
{
for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j)
{
ConCat.arr[recvindsbuf[j]] = recvdatabuf[j];
}
}
DeleteAll(recvindsbuf, recvcnt, rdispls);
return ConCat;
}
}
template <typename MATRIXA, typename MATRIXB>
bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B)
{
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return false;
}
if((void*) &A == (void*) &B)
{
std::ostringstream outs;
outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS);
return false;
}
return true;
}
// Combined logic for prune, recovery, and select
template <typename IT, typename NT, typename DER>
void MCLPruneRecoverySelect(SpParMat<IT,NT,DER> & A, NT hardThreshold, IT selectNum, IT recoverNum, NT recoverPct, int kselectVersion)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
#ifdef TIMING
double t0, t1;
#endif
// Prune and create a new pruned matrix
SpParMat<IT,NT,DER> PrunedA = A.Prune(std::bind2nd(std::less_equal<NT>(), hardThreshold), false);
// column-wise statistics of the pruned matrix
FullyDistVec<IT,NT> colSums = PrunedA.Reduce(Column, std::plus<NT>(), 0.0);
FullyDistVec<IT,NT> nnzPerColumnUnpruned = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> nnzPerColumn = PrunedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
//FullyDistVec<IT,NT> pruneCols(A.getcommgrid(), A.getncol(), hardThreshold);
FullyDistVec<IT,NT> pruneCols(nnzPerColumn);
pruneCols = hardThreshold;
PrunedA.FreeMemory();
FullyDistSpVec<IT,NT> recoverCols(nnzPerColumn, std::bind2nd(std::less<NT>(), recoverNum));
// recover only when nnzs in unprunned columns are greater than nnzs in pruned column
recoverCols = EWiseApply<NT>(recoverCols, nnzPerColumnUnpruned,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval > spval;},
false, NT());
recoverCols = recoverPct;
// columns with nnz < r AND sum < recoverPct (pct)
recoverCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT nrecover = recoverCols.getnnz();
if(nrecover > 0)
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(recoverCols, recoverNum, kselectVersion);
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(recoverCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing recovery: " << nrecover << std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(selectNum>0)
{
// remaining columns will be up for selection
FullyDistSpVec<IT,NT> selectCols = EWiseApply<NT>(recoverCols, colSums,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return spval==-1;},
true, static_cast<NT>(-1));
selectCols = selectNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval > spval;},
false, NT());
IT nselect = selectCols.getnnz();
if(nselect > 0 )
{
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, selectNum, kselectVersion); // PrunedA would also work
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << "Number of columns needing selection: " << nselect << std::endl;
SpParHelper::Print(outs.str());
#endif
#ifdef TIMING
t0=MPI_Wtime();
#endif
SpParMat<IT,NT,DER> selectedA = A.PruneColumn(pruneCols, std::less<NT>(), false);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
if(recoverNum>0 ) // recovery can be attempted after selection
{
FullyDistVec<IT,NT> nnzPerColumn1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistVec<IT,NT> colSums1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0);
selectedA.FreeMemory();
// slected columns with nnz < recoverNum (r)
selectCols = recoverNum;
selectCols = EWiseApply<NT>(selectCols, nnzPerColumn1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
// selected columns with sum < recoverPct (pct)
selectCols = recoverPct;
selectCols = EWiseApply<NT>(selectCols, colSums1,
[](NT spval, NT dval){return spval;},
[](NT spval, NT dval){return dval < spval;},
false, NT());
IT n_recovery_after_select = selectCols.getnnz();
if(n_recovery_after_select>0)
{
// mclExpandVector2 does it on the original vector
// mclExpandVector1 does it one pruned vector
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.Kselect(selectCols, recoverNum, kselectVersion); // Kselect on PrunedA might give different result
#ifdef TIMING
t1=MPI_Wtime();
mcl_kselecttime += (t1-t0);
#endif
pruneCols.Set(selectCols);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs1;
outs1 << "Number of columns needing recovery after selection: " << nselect << std::endl;
SpParHelper::Print(outs1.str());
#endif
}
}
}
}
// final prune
#ifdef TIMING
t0=MPI_Wtime();
#endif
A.PruneColumn(pruneCols, std::less<NT>(), true);
#ifdef TIMING
t1=MPI_Wtime();
mcl_prunecolumntime += (t1-t0);
#endif
// Add loops for empty columns
if(recoverNum<=0 ) // if recoverNum>0, recovery would have added nonzeros in empty columns
{
FullyDistVec<IT,NT> nnzPerColumnA = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;});
FullyDistSpVec<IT,NT> emptyColumns(nnzPerColumnA, std::bind2nd(std::equal_to<NT>(), 0.0));
emptyColumns = 1.00;
//Ariful: We need a selective AddLoops function with a sparse vector
//A.AddLoops(emptyColumns);
}
}
template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
IU EstimateFLOP
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
IU local_flops = 0;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
local_flops += EstimateLocalFLOP<SR>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
}
if(clearA && A.spSeq != NULL) {
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL) {
delete B.spSeq;
B.spSeq = NULL;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
IU global_flops = 0;
MPI_Allreduce(&local_flops, &global_flops, 1, MPI_LONG_LONG_INT, MPI_SUM, A.getcommgrid()->GetWorld());
return global_flops;
}
/**
* Broadcasts A multiple times (#phases) in order to save storage in the output
* Only uses 1/phases of C memory if the threshold/max limits are proper
* Parameters:
* - computationKernel: 1 means hash-based, 2 means heap-based
*/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> MemEfficientSpGEMM (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B,
int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int computationKernel, int64_t perProcessMemory)
{
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return SpParMat< IU,NUO,UDERO >();
}
if(phases <1 || phases >= A.getncol())
{
SpParHelper::Print("MemEfficientSpGEMM: The value of phases is too small or large. Resetting to 1.\n");
phases = 1;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
double t0, t1, t2, t3, t4, t5;
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t0 = MPI_Wtime();
#endif
if(perProcessMemory>0) // estimate the number of phases permitted by memory
{
int p;
MPI_Comm World = GridC->GetWorld();
MPI_Comm_size(World,&p);
int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1);
int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO);
// max nnz(A) in a porcess
int64_t lannz = A.getlocalnnz();
int64_t gannz;
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World);
int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA)
// max nnz(A^2) stored by SUMMA in a porcess
int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false);
int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step
// estimate kselect memory
int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices)
// this is equivalent to (asquareNNZ * p) / B.getcol()
int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d );
int64_t kselectmem = B.getlocalcols() * k * 8 * 3;
// estimate output memory
int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p);
int64_t outputMem = outputNNZ * perNNZMem_in * 2;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem;
if(remainingMem > 0)
{
phases = 1 + (asquareMem+kselectmem) / remainingMem;
}
if(myrank==0)
{
if(remainingMem < 0)
{
std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Warning: input and output memory requirement is greater than per-process avaiable memory. Keeping phase to the value supplied at the command line. The program may go out of memory and crash! \n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl;
}
#ifdef SHOW_MEMORY_USAGE
int64_t maxMemory = kselectmem/phases + inputMem + outputMem + asquareMem / phases;
if(maxMemory>1000000000)
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000000.00 << " GB" << " inputMem: " << inputMem/1000000000.00 << " GB" << " outputMem: " << outputMem/1000000000.00 << " GB" << " kselectmem: " << kselectmem/1000000000.00 << " GB" << std::endl;
else
std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000.00 << " MB" << " inputMem: " << inputMem/1000000.00 << " MB" << " outputMem: " << outputMem/1000000.00 << " MB" << " kselectmem: " << kselectmem/1000000.00 << " MB" << std::endl;
#endif
}
}
//if(myrank == 0){
//fprintf(stderr, "[MemEfficientSpGEMM] Running with phase: %d\n", phases);
//}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t1 = MPI_Wtime();
mcl_symbolictime += (t1-t0);
#endif
LIA C_m = A.spSeq->getnrow();
LIB C_n = B.spSeq->getncol();
std::vector< UDERB > PiecesOfB;
UDERB CopyB = *(B.spSeq); // we allow alias matrices as input because of this local copy
CopyB.ColSplit(phases, PiecesOfB); // CopyB's memory is destroyed at this point
MPI_Barrier(GridC->GetWorld());
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same");
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< UDERO > toconcatenate;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int p = 0; p< phases; ++p)
{
SpParHelper::GetSetSizes( PiecesOfB[p], BRecvSizes, (B.commGrid)->GetColWorld());
std::vector< SpTuples<LIC,NUO> *> tomerge;
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself) ARecv = A.spSeq; // shallow-copy
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t0 = MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
t1 = MPI_Wtime();
mcl_Abcasttime += (t1-t0);
#endif
ess.clear();
if(i == Bself) BRecv = &(PiecesOfB[p]); // shallow-copy
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
ess[j] = BRecvSizes[j][i];
BRecv = new UDERB();
}
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t2=MPI_Wtime();
#endif
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t3=MPI_Wtime();
mcl_Bbcasttime += (t3-t2);
#endif
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t4=MPI_Wtime();
#endif
SpTuples<LIC,NUO> * C_cont;
if(computationKernel == 1) C_cont = LocalSpGEMMHash<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself, false); // Hash SpGEMM without per-column sorting
else if(computationKernel == 2) C_cont=LocalSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself);
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t5=MPI_Wtime();
mcl_localspgemmtime += (t5-t4);
#endif
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
} // all stages executed
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_unmerged, lcnnz_unmerged = 0;
for(size_t i = 0; i < tomerge.size(); ++i)
{
lcnnz_unmerged += tomerge[i]->getnnz();
}
MPI_Allreduce(&lcnnz_unmerged, &gcnnz_unmerged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
int64_t summa_memory = gcnnz_unmerged*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gannz + gannz/phases) * 20; // last two for broadcasts
if(myrank==0)
{
if(summa_memory>1000000000)
std::cout << p+1 << ". unmerged: " << summa_memory/1000000000.00 << "GB " ;
else
std::cout << p+1 << ". unmerged: " << summa_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t6=MPI_Wtime();
#endif
// TODO: MultiwayMerge can directly return UDERO inorder to avoid the extra copy
SpTuples<LIC,NUO> * OnePieceOfC_tuples;
if(computationKernel == 1) OnePieceOfC_tuples = MultiwayMergeHash<SR>(tomerge, C_m, PiecesOfB[p].getncol(), true, false);
else if(computationKernel == 2) OnePieceOfC_tuples = MultiwayMerge<SR>(tomerge, C_m, PiecesOfB[p].getncol(), true);
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_merged, lcnnz_merged ;
lcnnz_merged = OnePieceOfC_tuples->getnnz();
MPI_Allreduce(&lcnnz_merged, &gcnnz_merged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t merge_memory = gcnnz_merged*2*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gcnnz_merged*2) * 20;
if(myrank==0)
{
if(merge_memory>1000000000)
std::cout << " merged: " << merge_memory/1000000000.00 << "GB " ;
else
std::cout << " merged: " << merge_memory/1000000.00 << " MB " ;
}
#endif
#ifdef TIMING
MPI_Barrier(A.getcommgrid()->GetWorld());
double t7=MPI_Wtime();
mcl_multiwaymergetime += (t7-t6);
#endif
UDERO * OnePieceOfC = new UDERO(* OnePieceOfC_tuples, false);
delete OnePieceOfC_tuples;
SpParMat<IU,NUO,UDERO> OnePieceOfC_mat(OnePieceOfC, GridC);
MCLPruneRecoverySelect(OnePieceOfC_mat, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion);
#ifdef SHOW_MEMORY_USAGE
int64_t gcnnz_pruned, lcnnz_pruned ;
lcnnz_pruned = OnePieceOfC_mat.getlocalnnz();
MPI_Allreduce(&lcnnz_pruned, &gcnnz_pruned, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD);
// TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore
int64_t prune_memory = gcnnz_pruned*2*20;//(gannz*2 + phase_nnz + gcnnz_pruned*2) * 20 + kselectmem; // 3 extra copies of OnePieceOfC_mat, we can make it one extra copy!
//phase_nnz += gcnnz_pruned;
if(myrank==0)
{
if(prune_memory>1000000000)
std::cout << "Prune: " << prune_memory/1000000000.00 << "GB " << std::endl ;
else
std::cout << "Prune: " << prune_memory/1000000.00 << " MB " << std::endl ;
}
#endif
// ABAB: Change this to accept pointers to objects
toconcatenate.push_back(OnePieceOfC_mat.seq());
}
UDERO * C = new UDERO(0,C_m, C_n,0);
C->ColConcatenate(toconcatenate); // ABAB: Change this to accept a vector of pointers to pointers to DER objects
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERA::esscount);
return SpParMat<IU,NUO,UDERO> (C, GridC);
}
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
int CalculateNumberOfPhases (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B,
NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory){
int phases;
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
double t0, t1, t2, t3, t4, t5;
int p;
MPI_Comm World = GridC->GetWorld();
MPI_Comm_size(World,&p);
int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1);
int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO);
// max nnz(A) in a porcess
int64_t lannz = A.getlocalnnz();
int64_t gannz;
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World);
int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA)
// max nnz(A^2) stored by SUMMA in a porcess
int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false);
int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step
// estimate kselect memory
int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices)
// this is equivalent to (asquareNNZ * p) / B.getcol()
int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d );
int64_t kselectmem = B.getlocalcols() * k * 8 * 3;
// estimate output memory
int64_t outputNNZ = (B.getlocalcols() * d)/sqrt(p);
//int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p); // if kselect is used
int64_t outputMem = outputNNZ * perNNZMem_in * 2;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
//int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem;
int64_t remainingMem = perProcessMemory*1000000000 - inputMem; // if each phase result is discarded
//if(remainingMem > 0)
//{
//phases = 1 + (asquareMem+kselectmem) / remainingMem;
//}
phases = 1 + asquareMem / remainingMem;
return phases;
}
/**
* Parallel C = A*B routine that uses a double buffered broadcasting scheme
* @pre { Input matrices, A and B, should not alias }
* Most memory efficient version available. Total stages: 2*sqrt(p)
* Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C)
* Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C)
* Final memory requirement: nnz(C) if clearA and clearB are true
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same");
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
LIA C_m = A.spSeq->getnrow();
LIB C_n = B.spSeq->getncol();
UDERA * A1seq = new UDERA();
UDERA * A2seq = new UDERA();
UDERB * B1seq = new UDERB();
UDERB * B2seq = new UDERB();
(A.spSeq)->Split( *A1seq, *A2seq);
const_cast< UDERB* >(B.spSeq)->Transpose();
(B.spSeq)->Split( *B1seq, *B2seq);
// Transpose back for the column-by-column algorithm
const_cast< UDERB* >(B1seq)->Transpose();
const_cast< UDERB* >(B2seq)->Transpose();
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<LIC,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A1seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B1seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// before activating this remove transposing B1seq
/*
SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
if(clearA) delete A1seq;
if(clearB) delete B1seq;
// Set the new dimensions
SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld());
// Start the second round
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A2seq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B2seq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// before activating this remove transposing B2seq
/*
SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
false, true, // transpose information (B is transposed)
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
*/
SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
else
delete C_cont;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
if(clearA)
{
delete A2seq;
delete A.spSeq;
A.spSeq = NULL;
}
else
{
(A.spSeq)->Merge(*A1seq, *A2seq);
delete A1seq;
delete A2seq;
}
if(clearB)
{
delete B2seq;
delete B.spSeq;
B.spSeq = NULL;
}
else
{
B1seq->Transpose();
B2seq->Transpose();
(B.spSeq)->Merge(*B1seq, *B2seq);
delete B1seq;
delete B2seq;
const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
}
UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false);
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
/**
* Parallel A = B*C routine that uses only MPI-1 features
* Relies on simple blocking broadcast
* @pre { Input matrices, A and B, should not alias }
**/
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<IU> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
SpTuples<IU,NUO> * C_cont = LocalSpGEMMHash<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
if(!C_cont->isZero())
tomerge.push_back(C_cont);
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << i << "th SUMMA iteration"<< std::endl;
SpParHelper::Print(outs.str());
#endif
}
if(clearA && A.spSeq != NULL)
{
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL)
{
delete B.spSeq;
B.spSeq = NULL;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,false);
UDERO * C = new UDERO(*C_tuples, false);
delete C_tuples;
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU, NUO, UDERO> Mult_AnXBn_Overlap
(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false )
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
if(!CheckSpGEMMCompliance(A,B) )
{
return SpParMat< IU,NUO,UDERO >();
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
IU C_m = A.spSeq->getnrow();
IU C_n = B.spSeq->getncol();
//const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA ** ARecv = new UDERA* [stages];
UDERB ** BRecv = new UDERB* [stages];
Arr<IU,NU1> Aarrinfo = A.seqptr()->GetArrays();
Arr<IU,NU2> Barrinfo = B.seqptr()->GetArrays();
std::vector< std::vector<MPI_Request> > ABCastIndarrayReq;
std::vector< std::vector<MPI_Request> > ABCastNumarrayReq;
std::vector< std::vector<MPI_Request> > BBCastIndarrayReq;
std::vector< std::vector<MPI_Request> > BBCastNumarrayReq;
for(int i = 0; i < stages; i++){
ABCastIndarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.indarrs.size(), MPI_REQUEST_NULL) );
ABCastNumarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.numarrs.size(), MPI_REQUEST_NULL) );
BBCastIndarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.indarrs.size(), MPI_REQUEST_NULL) );
BBCastNumarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.numarrs.size(), MPI_REQUEST_NULL) );
}
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
std::vector< SpTuples<IU,NUO> *> tomerge;
for(int i = 0; i < stages; ++i){
std::vector<IU> ess;
if(i == Aself) ARecv[i] = A.spSeq; // shallow-copy
else{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j) ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
ARecv[i] = new UDERA(); // first, create the object
}
SpParHelper::IBCastMatrix(GridC->GetRowWorld(), *(ARecv[i]), ess, i, ABCastIndarrayReq[i], ABCastNumarrayReq[i]); // then, receive its elements
ess.clear();
if(i == Bself) BRecv[i] = B.spSeq; // shallow-copy
else{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j) ess[j] = BRecvSizes[j][i];
BRecv[i] = new UDERB();
}
SpParHelper::IBCastMatrix(GridC->GetColWorld(), *(BRecv[i]), ess, i, BBCastIndarrayReq[i], BBCastNumarrayReq[i]); // then, receive its elements
if(i > 0){
MPI_Waitall(ABCastIndarrayReq[i-1].size(), ABCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(ABCastNumarrayReq[i-1].size(), ABCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastIndarrayReq[i-1].size(), BBCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastNumarrayReq[i-1].size(), BBCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE);
SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*(ARecv[i-1]), *(BRecv[i-1]), // parameters themselves
i-1 != Aself, // 'delete A' condition
i-1 != Bself); // 'delete B' condition
if(!C_cont->isZero()) tomerge.push_back(C_cont);
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true);
std::vector< SpTuples<IU,NUO> *>().swap(tomerge);
tomerge.push_back(C_tuples);
}
#ifdef COMBBLAS_DEBUG
std::ostringstream outs;
outs << i << "th SUMMA iteration"<< std::endl;
SpParHelper::Print(outs.str());
#endif
}
MPI_Waitall(ABCastIndarrayReq[stages-1].size(), ABCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(ABCastNumarrayReq[stages-1].size(), ABCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastIndarrayReq[stages-1].size(), BBCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
MPI_Waitall(BBCastNumarrayReq[stages-1].size(), BBCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE);
SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>
(*(ARecv[stages-1]), *(BRecv[stages-1]), // parameters themselves
stages-1 != Aself, // 'delete A' condition
stages-1 != Bself); // 'delete B' condition
if(!C_cont->isZero()) tomerge.push_back(C_cont);
if(clearA && A.spSeq != NULL) {
delete A.spSeq;
A.spSeq = NULL;
}
if(clearB && B.spSeq != NULL) {
delete B.spSeq;
B.spSeq = NULL;
}
delete ARecv;
delete BRecv;
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
// the last parameter to MergeAll deletes tomerge arrays
SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true);
std::vector< SpTuples<IU,NUO> *>().swap(tomerge);
UDERO * C = new UDERO(*C_tuples, false);
delete C_tuples;
//if(!clearB)
// const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original
return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object
}
/**
* Estimate the maximum nnz needed to store in a process from all stages of SUMMA before reduction
* @pre { Input matrices, A and B, should not alias }
**/
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
int64_t EstPerProcessNnzSUMMA(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool hashEstimate)
{
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same");
double t0, t1;
int64_t nnzC_SUMMA = 0;
if(A.getncol() != B.getnrow())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
return nnzC_SUMMA;
}
int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy);
MPI_Barrier(GridC->GetWorld());
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld());
SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld());
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
int Aself = (A.commGrid)->GetRankInProcRow();
int Bself = (B.commGrid)->GetRankInProcCol();
for(int i = 0; i < stages; ++i)
{
std::vector<LIA> ess;
if(i == Aself)
{
ARecv = A.spSeq; // shallow-copy
}
else
{
ess.resize(UDERA::esscount);
for(int j=0; j< UDERA::esscount; ++j)
{
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements
ess.clear();
if(i == Bself)
{
BRecv = B.spSeq; // shallow-copy
}
else
{
ess.resize(UDERB::esscount);
for(int j=0; j< UDERB::esscount; ++j)
{
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements
// no need to keep entries of colnnzC in larger precision
// because colnnzC is of length nzc and estimates nnzs per column
// @OGUZ-EDIT Using hash spgemm for estimation
//LIB * colnnzC = estimateNNZ(*ARecv, *BRecv);
LIB* flopC = estimateFLOP(*ARecv, *BRecv);
LIB* colnnzC = estimateNNZ_Hash(*ARecv, *BRecv, flopC);
LIB nzc = BRecv->GetDCSC()->nzc;
if (flopC) delete [] flopC;
if(colnnzC) delete [] colnnzC;
// sampling-based estimation (comment the estimation above, and
// comment out below to use)
// int64_t nnzC_stage = estimateNNZ_sampling(*ARecv, *BRecv);
// nnzC_SUMMA += nnzC_stage;
// delete received data
if(i != Aself)
delete ARecv;
if(i != Bself)
delete BRecv;
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
int64_t nnzC_SUMMA_max = 0;
MPI_Allreduce(&nnzC_SUMMA, &nnzC_SUMMA_max, 1, MPIType<int64_t>(), MPI_MAX, GridC->GetWorld());
return nnzC_SUMMA_max;
}
template <typename MATRIX, typename VECTOR>
void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x)
{
if(A.getncol() != x.TotalLength())
{
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << x.TotalLength() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) )
{
std::cout << "Grids are not comparable for SpMV" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
}
}
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf);
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >();
return SpMV<SR>(A, x, indexisvalue, optbuf);
}
/**
* Step 1 of the sparse SpMV algorithm
* @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated }
* @param[in] indexisvalue
**/
template<typename IU, typename NV>
void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue)
{
int32_t xlocnz = (int32_t) x.getlocnnz();
int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t
int32_t roffset;
IU luntil = x.LengthUntil();
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status);
MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status);
MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status);
// ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible
// Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth
trxinds = new int32_t[trxlocnz];
int32_t * temp_xind = new int32_t[xlocnz];
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i< xlocnz; ++i)
temp_xind[i] = (int32_t) x.ind[i];
MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status);
delete [] temp_xind;
if(!indexisvalue)
{
trxnums = new NV[trxlocnz];
MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status);
}
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces)
}
/**
* Step 2 of the sparse SpMV algorithm
* @param[in,out] trxinds, trxnums { deallocated }
* @param[in,out] indacc, numacc { allocated }
* @param[in,out] accnz { set }
* @param[in] trxlocnz, lenuntil, indexisvalue
**/
template<typename IU, typename NV>
void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums,
int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue)
{
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
accnz = std::accumulate(colnz, colnz+colneighs, 0);
indacc = new int32_t[accnz];
numacc = new NV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
// This will happen when n/sqrt(p) > 2^31
// Currently we can solve a small problem (scale 32) with 4096 processor
// For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180
// 2^35 / 180 ~ 2^29 / 3 which is not an issue !
#ifdef TIMING
double t0=MPI_Wtime();
#endif
MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld);
delete [] trxinds;
if(indexisvalue)
{
IU lenuntilcol;
if(colrank == 0) lenuntilcol = lenuntil;
MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld);
for(int i=0; i< accnz; ++i) // fill numerical values from indices
{
numacc[i] = indacc[i] + lenuntilcol;
}
}
else
{
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld);
delete [] trxnums;
}
#ifdef TIMING
double t1=MPI_Wtime();
cblas_allgathertime += (t1-t0);
#endif
DeleteAll(colnz,dpls);
}
/**
* Step 3 of the sparse SpMV algorithm, with the semiring
* @param[in,out] optbuf {scratch space for all-to-all (fold) communication}
* @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit}
* @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created}
**/
template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc,
int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
if(optbuf.totmax > 0) // graph500 optimization enabled
{
if(A.spSeq->getnsplit() > 0)
{
// optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded
generic_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs);
}
else
{
generic_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue);
}
DeleteAll(indacc,numacc);
}
else
{
if(A.spSeq->getnsplit() > 0)
{
// sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded
int totalsent = generic_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs, SPA);
DeleteAll(indacc, numacc);
for(int i=0; i<rowneighs-1; ++i)
sendcnt[i] = sdispls[i+1] - sdispls[i];
sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1];
}
else
{
// default SpMSpV
std::vector< int32_t > indy;
std::vector< OVT > numy;
generic_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy, SPA);
DeleteAll(indacc, numacc);
int32_t bufsize = indy.size(); // as compact as possible
sendindbuf = new int32_t[bufsize];
sendnumbuf = new OVT[bufsize];
int32_t perproc = A.getlocalrows() / rowneighs;
int k = 0; // index to buffer
for(int i=0; i<rowneighs; ++i)
{
int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc;
while(k < bufsize && indy[k] < end_this)
{
sendindbuf[k] = indy[k] - i*perproc;
sendnumbuf[k] = numy[k];
++sendcnt[i];
++k;
}
}
sdispls = new int[rowneighs]();
std::partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1);
//#endif
}
}
}
// non threaded
template <typename SR, typename IU, typename OVT>
void MergeContributions(int* listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU>& mergedind, std::vector<OVT>& mergednum)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int32_t hsize = 0;
int32_t inf = std::numeric_limits<int32_t>::min();
int32_t sup = std::numeric_limits<int32_t>::max();
KNHeap< int32_t, int32_t > sHeap(sup, inf);
int * processed = new int[nlists]();
for(int i=0; i<nlists; ++i)
{
if(listSizes[i] > 0)
{
// key, list_id
sHeap.insert(indsvec[i][0], i);
++hsize;
}
}
int32_t key, locv;
if(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
mergedind.push_back( static_cast<IU>(key));
mergednum.push_back(numsvec[locv][0]); // nothing is processed yet
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
while(hsize > 0)
{
sHeap.deleteMin(&key, &locv);
if(mergedind.back() == static_cast<IU>(key))
{
mergednum.back() = SR::add(mergednum.back(), numsvec[locv][processed[locv]]);
// ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection
// We can just skip this addition operator (if it's a max/min select)
}
else
{
mergedind.push_back(static_cast<IU>(key));
mergednum.push_back(numsvec[locv][processed[locv]]);
}
if( (++(processed[locv])) < listSizes[locv] )
sHeap.insert(indsvec[locv][processed[locv]], locv);
else
--hsize;
}
DeleteAll(processed);
}
template <typename SR, typename IU, typename OVT>
void MergeContributions_threaded(int * & listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU> & mergedind, std::vector<OVT> & mergednum, IU maxindex)
{
int nlists = indsvec.size();
// this condition is checked in the caller SpMV function.
// I am still putting it here for completeness
if(nlists == 1)
{
// simply copy data
int veclen = listSizes[0];
mergedind.resize(veclen);
mergednum.resize(veclen);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<veclen; i++)
{
mergedind[i] = indsvec[0][i];
mergednum[i] = numsvec[0][i];
}
return;
}
int nthreads=1;
#ifdef THREADED
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
int nsplits = 4*nthreads; // oversplit for load balance
nsplits = std::min(nsplits, (int)maxindex);
std::vector< std::vector<int32_t> > splitters(nlists);
for(int k=0; k< nlists; k++)
{
splitters[k].resize(nsplits+1);
splitters[k][0] = static_cast<int32_t>(0);
#pragma omp parallel for
for(int i=1; i< nsplits; i++)
{
IU cur_idx = i * (maxindex/nsplits);
auto it = std::lower_bound (indsvec[k], indsvec[k] + listSizes[k], cur_idx);
splitters[k][i] = (int32_t) (it - indsvec[k]);
}
splitters[k][nsplits] = listSizes[k];
}
// ------ perform merge in parallel ------
std::vector<std::vector<IU>> indsBuf(nsplits);
std::vector<std::vector<OVT>> numsBuf(nsplits);
//TODO: allocate these vectors here before calling MergeContributions
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::vector<int32_t *> tIndsVec(nlists);
std::vector<OVT *> tNumsVec(nlists);
std::vector<int> tLengths(nlists);
for(int j=0; j< nlists; ++j)
{
tIndsVec[j] = indsvec[j] + splitters[j][i];
tNumsVec[j] = numsvec[j] + splitters[j][i];
tLengths[j]= splitters[j][i+1] - splitters[j][i];
}
MergeContributions<SR>(tLengths.data(), tIndsVec, tNumsVec, indsBuf[i], numsBuf[i]);
}
// ------ concatenate merged tuples processed by threads ------
std::vector<IU> tdisp(nsplits+1);
tdisp[0] = 0;
for(int i=0; i<nsplits; ++i)
{
tdisp[i+1] = tdisp[i] + indsBuf[i].size();
}
mergedind.resize(tdisp[nsplits]);
mergednum.resize(tdisp[nsplits]);
#pragma omp parallel for schedule(dynamic)
for(int i=0; i< nsplits; i++)
{
std::copy(indsBuf[i].data() , indsBuf[i].data() + indsBuf[i].size(), mergedind.data() + tdisp[i]);
std::copy(numsBuf[i].data() , numsBuf[i].data() + numsBuf[i].size(), mergednum.data() + tdisp[i]);
}
}
/**
* This version is the most flexible sparse matrix X sparse vector [Used in KDT]
* It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT)
* without relying on automatic type promotion
* Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x.
*/
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,
bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA)
{
CheckSpMVCompliance(A,x);
optbuf.MarkEmpty();
y.glen = A.getnrow(); // in case it is not set already
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int accnz;
int32_t trxlocnz;
IU lenuntil;
int32_t *trxinds, *indacc;
IVT *trxnums, *numacc;
#ifdef TIMING
double t0=MPI_Wtime();
#endif
TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue);
#ifdef TIMING
double t1=MPI_Wtime();
cblas_transvectime += (t1-t0);
#endif
if(x.commGrid->GetGridRows() > 1)
{
AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); // trxindS/trxnums deallocated, indacc/numacc allocated, accnz set
}
else
{
accnz = trxlocnz;
indacc = trxinds; // aliasing ptr
numacc = trxnums; // aliasing ptr
}
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
int * sendcnt = new int[rowneighs]();
int32_t * sendindbuf;
OVT * sendnumbuf;
int * sdispls;
#ifdef TIMING
double t2=MPI_Wtime();
#endif
LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue, SPA); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated
#ifdef TIMING
double t3=MPI_Wtime();
cblas_localspmvtime += (t3-t2);
#endif
if(x.commGrid->GetGridCols() == 1)
{
y.ind.resize(sendcnt[0]);
y.num.resize(sendcnt[0]);
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = optbuf.inds[i];
y.num[i] = optbuf.nums[i];
}
}
else
{
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<sendcnt[0]; i++)
{
y.ind[i] = sendindbuf[i];
y.num[i] = sendnumbuf[i];
}
DeleteAll(sendindbuf, sendnumbuf,sdispls);
}
delete [] sendcnt;
return;
}
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
// receive displacements are exact whereas send displacements have slack
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
int32_t * recvindbuf = new int32_t[totrecv];
OVT * recvnumbuf = new OVT[totrecv];
#ifdef TIMING
double t4=MPI_Wtime();
#endif
if(optbuf.totmax > 0 ) // graph500 optimization enabled
{
MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
delete [] sendcnt;
}
else
{
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf, sendcnt, sdispls);
}
#ifdef TIMING
double t5=MPI_Wtime();
cblas_alltoalltime += (t5-t4);
#endif
#ifdef TIMING
double t6=MPI_Wtime();
#endif
//MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs);
// free memory of y, in case it was aliased
std::vector<IU>().swap(y.ind);
std::vector<OVT>().swap(y.num);
std::vector<int32_t *> indsvec(rowneighs);
std::vector<OVT *> numsvec(rowneighs);
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<rowneighs; i++)
{
indsvec[i] = recvindbuf+rdispls[i];
numsvec[i] = recvnumbuf+rdispls[i];
}
#ifdef THREADED
MergeContributions_threaded<SR>(recvcnt, indsvec, numsvec, y.ind, y.num, y.MyLocLength());
#else
MergeContributions<SR>(recvcnt, indsvec, numsvec, y.ind, y.num);
#endif
DeleteAll(recvcnt, rdispls,recvindbuf, recvnumbuf);
#ifdef TIMING
double t7=MPI_Wtime();
cblas_mergeconttime += (t7-t6);
#endif
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, PreAllocatedSPA<OVT> & SPA)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue)
{
OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >();
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf)
{
PreAllocatedSPA<OVT> SPA;
SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA);
}
/**
* Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type
* If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors)
**/
template <typename SR, typename IU, typename NUM, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf)
{
typedef typename promote_trait<NUM,IU>::T_promote T_promote;
FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors
SpMV<SR>(A, x, y, indexisvalue, optbuf);
return y;
}
/**
* Parallel dense SpMV
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x )
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xsize = (int) x.LocArrSize();
int trxsize = 0;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status);
NUV * trxnums = new NUV[trxsize];
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status);
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colsize = new int[colneighs];
colsize[colrank] = trxsize;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colsize, colsize+colneighs-1, dpls+1);
int accsize = std::accumulate(colsize, colsize+colneighs, 0);
NUV * numacc = new NUV[accsize];
MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld);
delete [] trxnums;
// serial SpMV with dense vector
T_promote id = SR::id();
IU ysize = A.getlocalrows();
T_promote * localy = new T_promote[ysize];
std::fill_n(localy, ysize, id);
#ifdef THREADED
dcsc_gespmv_threaded<SR>(*(A.spSeq), numacc, localy);
#else
dcsc_gespmv<SR>(*(A.spSeq), numacc, localy);
#endif
DeleteAll(numacc,colsize, dpls);
// FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id)
FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id);
int rowneighs;
MPI_Comm_size(RowWorld, &rowneighs);
IU begptr, endptr;
for(int i=0; i< rowneighs; ++i)
{
begptr = y.RowLenUntil(i);
if(i == rowneighs-1)
{
endptr = ysize;
}
else
{
endptr = y.RowLenUntil(i+1);
}
MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld);
}
delete [] localy;
return y;
}
/**
* \TODO: Old version that is no longer considered optimal
* Kept for legacy purposes
* To be removed when other functionals are fully tested.
**/
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV
(const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x)
{
typedef typename promote_trait<NUM,NUV>::T_promote T_promote;
CheckSpMVCompliance(A, x);
MPI_Comm World = x.commGrid->GetWorld();
MPI_Comm ColWorld = x.commGrid->GetColWorld();
MPI_Comm RowWorld = x.commGrid->GetRowWorld();
int xlocnz = (int) x.getlocnnz();
int trxlocnz = 0;
int roffst = x.RowLenUntil();
int offset;
int diagneigh = x.commGrid->GetComplementRank();
MPI_Status status;
MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status);
MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status);
IU * trxinds = new IU[trxlocnz];
NUV * trxnums = new NUV[trxlocnz];
MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status);
MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status);
std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces)
int colneighs, colrank;
MPI_Comm_size(ColWorld, &colneighs);
MPI_Comm_rank(ColWorld, &colrank);
int * colnz = new int[colneighs];
colnz[colrank] = trxlocnz;
MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld);
int * dpls = new int[colneighs](); // displacements (zero initialized pid)
std::partial_sum(colnz, colnz+colneighs-1, dpls+1);
int accnz = std::accumulate(colnz, colnz+colneighs, 0);
IU * indacc = new IU[accnz];
NUV * numacc = new NUV[accnz];
// ABAB: Future issues here, colnz is of type int (MPI limitation)
// What if the aggregate vector size along the processor row/column is not 32-bit addressible?
MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld);
MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld);
DeleteAll(trxinds, trxnums);
// serial SpMV with sparse vector
std::vector< int32_t > indy;
std::vector< T_promote > numy;
int32_t * tmpindacc = new int32_t[accnz];
for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i];
delete [] indacc;
dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication
DeleteAll(tmpindacc, numacc);
DeleteAll(colnz, dpls);
FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors
IU yintlen = y.MyRowLength();
int rowneighs;
MPI_Comm_size(RowWorld,&rowneighs);
std::vector< std::vector<IU> > sendind(rowneighs);
std::vector< std::vector<T_promote> > sendnum(rowneighs);
typename std::vector<int32_t>::size_type outnz = indy.size();
for(typename std::vector<IU>::size_type i=0; i< outnz; ++i)
{
IU locind;
int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind);
sendind[rown].push_back(locind);
sendnum[rown].push_back(numy[i]);
}
IU * sendindbuf = new IU[outnz];
T_promote * sendnumbuf = new T_promote[outnz];
int * sendcnt = new int[rowneighs];
int * sdispls = new int[rowneighs];
for(int i=0; i<rowneighs; ++i)
sendcnt[i] = sendind[i].size();
int * rdispls = new int[rowneighs];
int * recvcnt = new int[rowneighs];
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<rowneighs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0);
IU * recvindbuf = new IU[totrecv];
T_promote * recvnumbuf = new T_promote[totrecv];
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]);
std::vector<IU>().swap(sendind[i]);
}
for(int i=0; i<rowneighs; ++i)
{
std::copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]);
std::vector<T_promote>().swap(sendnum[i]);
}
MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld);
MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld);
DeleteAll(sendindbuf, sendnumbuf);
DeleteAll(sendcnt, recvcnt, sdispls, rdispls);
// define a SPA-like data structure
IU ysize = y.MyLocLength();
T_promote * localy = new T_promote[ysize];
bool * isthere = new bool[ysize];
std::vector<IU> nzinds; // nonzero indices
std::fill_n(isthere, ysize, false);
for(int i=0; i< totrecv; ++i)
{
if(!isthere[recvindbuf[i]])
{
localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment
nzinds.push_back(recvindbuf[i]);
isthere[recvindbuf[i]] = true;
}
else
{
localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]);
}
}
DeleteAll(isthere, recvindbuf, recvnumbuf);
sort(nzinds.begin(), nzinds.end());
int nnzy = nzinds.size();
y.ind.resize(nnzy);
y.num.resize(nnzy);
for(int i=0; i< nnzy; ++i)
{
y.ind[i] = nzinds[i];
y.num[i] = localy[nzinds[i]];
}
delete [] localy;
return y;
}
// Aydin (June 2021):
// This currently duplicates the work of EWiseMult with exclude = true
// However, this is the right way of implementing it because it allows set difference when
// the types of two matrices do not have a valid multiplication operator defined
// set difference should not require such an operator so we will move all code
// bases that use EWiseMult(..., exclude=true) to this one
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,NU1,UDERA> SetDifference(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B)
{
if(*(A.commGrid) == *(B.commGrid))
{
UDERA * result = new UDERA( SetDifference(*(A.spSeq),*(B.spSeq)));
return SpParMat<IU, NU1, UDERA> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable for set difference" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,NU1,UDERA >();
}
}
template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude)
{
typedef typename promote_trait<NU1,NU2>::T_promote N_promote;
typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote;
if(*(A.commGrid) == *(B.commGrid))
{
DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) );
return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,N_promote,DER_promote >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER> EWiseApply
(const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp)
{
if(*(A.commGrid) == *(B.commGrid))
{
RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) );
return SpParMat<IU, RETT, RETDER> (result, A.commGrid);
}
else
{
std::cout << "Grids are not comparable elementwise apply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return SpParMat< IU,RETT,RETDER >();
}
}
// plain adapter
template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate>
SpParMat<IU,RETT,RETDER>
EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true)
{
return EWiseApply<RETT, RETDER>(A, B,
EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op),
allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true);
}
// end adapter
/**
* if exclude is true, then we prune all entries W[i] != zero from V
* if exclude is false, then we perform a proper elementwise multiplication
**/
template <typename IU, typename NU1, typename NU2>
FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero)
{
typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::cerr << "Vector dimensions don't match for EWiseMult\n";
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= V.getlocnnz();
if(exclude)
{
#if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial
int actual_splits = cblas_splits * 1; // 1 is the parallel slackness
std::vector <IU> tlosizes (actual_splits, 0);
std::vector < std::vector<IU> > tlinds(actual_splits);
std::vector < std::vector<T_promote> > tlnums(actual_splits);
IU tlsize = size / actual_splits;
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t = 0; t < actual_splits; ++t)
{
IU tlbegin = t*tlsize;
IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize;
for(IU i=tlbegin; i<tlend; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
tlinds[t].push_back(V.ind[i]);
tlnums[t].push_back(V.num[i]);
tlosizes[t]++;
}
}
}
std::vector<IU> prefix_sum(actual_splits+1,0);
std::partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1);
Product.ind.resize(prefix_sum[actual_splits]);
Product.num.resize(prefix_sum[actual_splits]);
#pragma omp parallel for //schedule(dynamic, 1)
for(IU t=0; t< actual_splits; ++t)
{
std::copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]);
std::copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]);
}
#else
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] == zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i]);
}
}
#endif
}
else
{
for(IU i=0; i<size; ++i)
{
if(W.arr[V.ind[i]] != zero) // keep only those
{
Product.ind.push_back(V.ind[i]);
Product.num.push_back(V.num[i] * W.arr[V.ind[i]]);
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable elementwise multiplication" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
Threaded EWiseApply. Only called internally from EWiseApply.
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply_threaded
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
int nthreads=1;
#ifdef _OPENMP
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
// temporary result vectors per thread
std::vector<std::vector<IU>> tProductInd(nthreads);
std::vector<std::vector<T_promote>> tProductVal(nthreads);
IU perthread; //chunk of tProductInd or tProductVal allocated to each thread
if (allowVNulls)
perthread = size/nthreads;
else
perthread = spsize/nthreads;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
IU tStartIdx = perthread * curthread;
IU tNextIdx = perthread * (curthread+1);
if (allowVNulls)
{
if(curthread == nthreads-1) tNextIdx = size;
// get sparse part for the current thread
auto it = std::lower_bound (V.ind.begin(), V.ind.end(), tStartIdx);
IU tSpIdx = (IU) std::distance(V.ind.begin(), it);
// iterate over the dense vector
for(IU tIdx=tStartIdx; tIdx < tNextIdx; ++tIdx)
{
if(tSpIdx < spsize && V.ind[tSpIdx] < tNextIdx && V.ind[tSpIdx] == tIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[tIdx], false, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[tIdx], false, false));
}
tSpIdx++;
}
else
{
if (_doOp(Vzero, W.arr[tIdx], true, false))
{
tProductInd[curthread].push_back(tIdx);
tProductVal[curthread].push_back (_binary_op(Vzero, W.arr[tIdx], true, false));
}
}
}
}
else // iterate over the sparse vector
{
if(curthread == nthreads-1) tNextIdx = spsize;
for(IU tSpIdx=tStartIdx; tSpIdx < tNextIdx; ++tSpIdx)
{
if (_doOp(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false))
{
tProductInd[curthread].push_back( V.ind[tSpIdx]);
tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false));
}
}
}
}
std::vector<IU> tdisp(nthreads+1);
tdisp[0] = 0;
for(int i=0; i<nthreads; ++i)
{
tdisp[i+1] = tdisp[i] + tProductInd[i].size();
}
// copy results from temporary vectors
Product.ind.resize(tdisp[nthreads]);
Product.num.resize(tdisp[nthreads]);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int curthread = 0;
#ifdef _OPENMP
curthread = omp_get_thread_num();
#endif
std::copy(tProductInd[curthread].begin(), tProductInd[curthread].end(), Product.ind.data() + tdisp[curthread]);
std::copy(tProductVal[curthread].begin() , tProductVal[curthread].end(), Product.num.data() + tdisp[curthread]);
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0)
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp)
{
#ifdef _OPENMP
return EWiseApply_threaded<RET>(V, W, _binary_op, _doOp, allowVNulls, Vzero, useExtendedBinOp);
#else
typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
//FullyDistVec< IU, NU1> DV (V); // Ariful: I am not sure why it was there??
if(V.TotalLength() != W.TotalLength())
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
IU size= W.LocArrSize();
IU spsize = V.getlocnnz();
IU sp_iter = 0;
if (allowVNulls)
{
// iterate over the dense vector
for(IU i=0; i<size; ++i)
{
if(sp_iter < spsize && V.ind[sp_iter] == i)
{
if (_doOp(V.num[sp_iter], W.arr[i], false, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false));
}
sp_iter++;
}
else
{
if (_doOp(Vzero, W.arr[i], true, false))
{
Product.ind.push_back(i);
Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false));
}
}
}
}
else
{
// iterate over the sparse vector
for(sp_iter = 0; sp_iter < spsize; ++sp_iter)
{
if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false))
{
Product.ind.push_back(V.ind[sp_iter]);
Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false));
}
}
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
#endif
}
/**
* Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret.
* The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not
* performed and ret does not contain an element at that position.
* More formally the operation is defined as:
* if (_doOp(V[i], W[i]))
* ret[i] = _binary_op(V[i], W[i])
* else
* // ret[i] is not set
* Hence _doOp can be used to implement a filter on either of the vectors.
*
* The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does)
* the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value.
* !allowVNulls && !allowWNulls => intersection
* !allowVNulls && allowWNulls => operate on all elements of V
* allowVNulls && !allowWNulls => operate on all elements of W
* allowVNulls && allowWNulls => union
*
* The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter:
* FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, ...)
* For intersection, Vzero and Wzero are irrelevant
* ABAB: \todo: Should allowIntersect be "false" for all SetDifference uses?
**/
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp)
{
typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote;
if(*(V.commGrid) == *(W.commGrid))
{
FullyDistSpVec< IU, T_promote> Product(V.commGrid);
if(V.glen != W.glen)
{
std::ostringstream outs;
outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n";
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
else
{
Product.glen = V.glen;
typename std::vector< IU >::const_iterator indV = V.ind.begin();
typename std::vector< NU1 >::const_iterator numV = V.num.begin();
typename std::vector< IU >::const_iterator indW = W.ind.begin();
typename std::vector< NU2 >::const_iterator numW = W.num.begin();
while (indV < V.ind.end() && indW < W.ind.end())
{
if (*indV == *indW)
{
// overlap
if (allowIntersect)
{
if (_doOp(*numV, *numW, false, false))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, *numW, false, false));
}
}
indV++; numV++;
indW++; numW++;
}
else if (*indV < *indW)
{
// V has value but W does not
if (allowWNulls)
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
}
indV++; numV++;
}
else //(*indV > *indW)
{
// W has value but V does not
if (allowVNulls)
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
}
indW++; numW++;
}
}
// clean up
while (allowWNulls && indV < V.ind.end())
{
if (_doOp(*numV, Wzero, false, true))
{
Product.ind.push_back(*indV);
Product.num.push_back(_binary_op(*numV, Wzero, false, true));
}
indV++; numV++;
}
while (allowVNulls && indW < W.ind.end())
{
if (_doOp(Vzero, *numW, true, false))
{
Product.ind.push_back(*indW);
Product.num.push_back(_binary_op(Vzero, *numW, true, false));
}
indW++; numW++;
}
}
return Product;
}
else
{
std::cout << "Grids are not comparable for EWiseApply" << std::endl;
MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH);
return FullyDistSpVec< IU,T_promote>();
}
}
// plain callback versions
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, Vzero, true);
}
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
FullyDistSpVec<IU,RET> EWiseApply
(const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true)
{
return EWiseApply<RET>(V, W,
EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op),
EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp),
allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// sampling-based nnz estimation via SpMV
// @OGUZ-NOTE This is not based on SUMMA, do not use. Estimates the number of
// nonzeros in the final output matrix.
#define NROUNDS 5
typedef std::array<float, NROUNDS> samparr_t;
template <typename NZT>
struct promote_trait<NZT, samparr_t>
{
typedef samparr_t T_promote;
};
class SamplesSaveHandler
{
public:
template<typename c, typename t, typename V>
void save(std::basic_ostream<c, t> &os,
std::array<V, NROUNDS> &sample_vec,
int64_t index)
{
for (auto it = sample_vec.begin(); it != sample_vec.end(); ++it)
os << *it << " ";
}
};
template<typename NZT>
struct SelectMinxSR
{
static samparr_t id()
{
samparr_t arr;
for (auto it = arr.begin(); it != arr.end(); ++it)
*it = std::numeric_limits<float>::max();
return arr;
}
static bool returnedSAID()
{
return false;
}
static samparr_t
add (const samparr_t &arg1, const samparr_t &arg2)
{
samparr_t out;
for (int i = 0; i < NROUNDS; ++i)
out[i] = std::min(arg1[i], arg2[i]);
return out;
}
static samparr_t
multiply (const NZT arg1, const samparr_t &arg2)
{
return arg2;
}
static void axpy (const NZT a, const samparr_t &x, samparr_t &y)
{
y = add(y, multiply(a, x));
}
static MPI_Op mpi_op()
{
static MPI_Op mpiop;
static bool exists = false;
if (exists)
return mpiop;
else
{
MPI_Op_create(MPI_func, true, &mpiop);
exists = true;
return mpiop;
}
}
static void
MPI_func(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
{
samparr_t *in = static_cast<samparr_t *>(invec);
samparr_t *inout = static_cast<samparr_t *>(inoutvec);
for (int i = 0; i < *len; ++i)
inout[i] = add(inout[i], in[i]);
}
};
template <typename IU, typename NU1, typename NU2,
typename UDERA, typename UDERB>
int64_t
EstPerProcessNnzSpMV(
SpParMat<IU, NU1, UDERA> &A, SpParMat<IU, NU2, UDERB> &B
)
{
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
float lambda = 1.0f;
int nthds = 1;
#ifdef THREADED
#pragma omp parallel
#endif
{
nthds = omp_get_num_threads();
}
if (myrank == 0)
std::cout << "taking transposes." << std::endl;
A.Transpose();
B.Transpose();
if (myrank == 0)
std::cout << "setting initial samples." << std::endl;
samparr_t sa;
FullyDistVec<IU, samparr_t> samples_init(A.getcommgrid(), A.getncol(), sa);
#ifdef THREADED
#pragma omp parallel
#endif
{
std::default_random_engine gen;
std::exponential_distribution<float> exp_dist(lambda);
#ifdef THREADED
#pragma omp parallel for
#endif
for (IU i = 0; i < samples_init.LocArrSize(); ++i)
{
samparr_t tmp;
for (auto it = tmp.begin(); it != tmp.end(); ++it)
*it = exp_dist(gen);
samples_init.SetLocalElement(i, tmp);
}
}
// std::string fname("samples_init");
// samples_init.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing mid samples." << std::endl;
FullyDistVec<IU, samparr_t> samples_mid =
SpMV<SelectMinxSR<NU1> > (A, samples_init);
// fname = "samples_mid";
// samples_mid.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing final samples." << std::endl;
FullyDistVec<IU, samparr_t> samples_final =
SpMV<SelectMinxSR<NU2> > (B, samples_mid);
// fname = "samples_final";
// samples_final.ParallelWrite(fname, 1, SamplesSaveHandler(), true);
if (myrank == 0)
std::cout << "computing nnz estimation." << std::endl;
float nnzest = 0.0f;
std::cout << myrank << "samples_final loc size: "
<< samples_final.LocArrSize() << std::endl;
const samparr_t *lsamples = samples_final.GetLocArr();
#ifdef THREADED
#pragma omp parallel for reduction (+:nnzest)
#endif
for (IU i = 0; i < samples_final.LocArrSize(); ++i)
{
float tmp = 0.0f;
for (auto it = lsamples[i].begin(); it != lsamples[i].end(); ++it)
tmp += *it;
nnzest += static_cast<float>(NROUNDS - 1) / tmp;
}
if (myrank == 0)
std::cout << "taking transposes again." << std::endl;
int64_t nnzC_est = nnzest;
int64_t nnzC_tot = 0;
MPI_Allreduce(&nnzC_est, &nnzC_tot, 1, MPIType<int64_t>(), MPI_SUM,
(B.commGrid)->GetWorld());
if (myrank == 0)
std::cout << "sampling-based spmv est tot: " << nnzC_tot << std::endl;
// revert back
A.Transpose();
B.Transpose();
return nnzC_tot;
}
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDER1, typename UDER2>
SpParMat3D<IU,NUO,UDERO> Mult_AnXBn_SUMMA3D(SpParMat3D<IU,NU1,UDER1> & A, SpParMat3D<IU,NU2,UDER2> & B){
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
typedef typename UDERO::LocalIT LIC;
typedef typename UDER1::LocalIT LIA;
typedef typename UDER2::LocalIT LIB;
#ifdef TIMING
double t0, t1, t2, t3;
#endif
/*
* Check if A and B are multipliable
* */
if(A.getncol() != B.getnrow()){
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
/*
* Calculate, accross fibers, which process should get how many columns after redistribution
* */
vector<LIB> divisions3d;
// Calcuclate split boundaries as if all contents of the layer is being re-distributed along fiber
// These boundaries will be used later on
B.CalculateColSplitDistributionOfLayer(divisions3d);
#ifdef TIMING
t0 = MPI_Wtime();
#endif
/*
* SUMMA Starts
* */
int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(),
(B.GetLayerMat()->getcommgrid()).get(),
stages, dummy, dummy);
IU C_m = A.GetLayerMat()->seqptr()->getnrow();
IU C_n = B.GetLayerMat()->seqptr()->getncol();
IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages);
IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages);
SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() );
SpParHelper::GetSetSizes( *(B.GetLayerMat()->seqptr()), BRecvSizes, (B.GetLayerMat()->getcommgrid())->GetColWorld() );
// Remotely fetched matrices are stored as pointers
UDERO * ARecv;
UDER2 * BRecv;
std::vector< SpTuples<IU,NUO> *> tomerge;
int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow();
int Bself = (B.GetLayerMat()->getcommgrid())->GetRankInProcCol();
double Abcast_time = 0;
double Bbcast_time = 0;
double Local_multiplication_time = 0;
for(int i = 0; i < stages; ++i) {
std::vector<IU> ess;
if(i == Aself){
ARecv = A.GetLayerMat()->seqptr(); // shallow-copy
}
else{
ess.resize(UDER1::esscount);
for(int j=0; j<UDER1::esscount; ++j) {
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDER1(); // first, create the object
}
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Aself != i) {
ARecv->Create(ess);
}
Arr<IU,NU1> Aarrinfo = ARecv->GetArrays();
for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld());
}
for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
Abcast_time += (t3-t2);
#endif
ess.clear();
if(i == Bself){
BRecv = B.GetLayerMat()->seqptr(); // shallow-copy
}
else{
ess.resize(UDER2::esscount);
for(int j=0; j<UDER2::esscount; ++j) {
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDER2();
}
MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld());
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Bself != i) {
BRecv->Create(ess);
}
Arr<IU,NU2> Barrinfo = BRecv->GetArrays();
for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld());
}
for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
Bbcast_time += (t3-t2);
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_cont = LocalSpGEMMHash<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself, // 'delete B' condition
false); // not to sort each column
#ifdef TIMING
t3 = MPI_Wtime();
Local_multiplication_time += (t3-t2);
#endif
if(!C_cont->isZero()) tomerge.push_back(C_cont);
}
SpHelper::deallocate2D(ARecvSizes, UDER1::esscount);
SpHelper::deallocate2D(BRecvSizes, UDER2::esscount);
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<IU,NUO> * C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort
//SpTuples<IU,NUO> * C_tuples = MultiwayMergeHashSliding<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort
#ifdef TIMING
t3 = MPI_Wtime();
#endif
#ifdef TIMING
if(myrank == 0){
fprintf(stderr, "[SUMMA3D]\tAbcast_time: %lf\n", Abcast_time);
fprintf(stderr, "[SUMMA3D]\tBbcast_time: %lf\n", Bbcast_time);
fprintf(stderr, "[SUMMA3D]\tLocal_multiplication_time: %lf\n", Local_multiplication_time);
fprintf(stderr, "[SUMMA3D]\tMerge_layer_time: %lf\n", (t3-t2));
}
#endif
/*
* SUMMA Ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tSUMMA time: %lf\n", (t1-t0));
#endif
/*
* 3d-reduction starts
* */
#ifdef TIMING
//MPI_Barrier(getcommgrid3D()->GetWorld());
t0 = MPI_Wtime();
#endif
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
/*
* Create a profile with information regarding data to be sent and received between layers
* These memory allocation needs to be `int` specifically because some of these arrays would be used in communication
* This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement
* */
int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
vector<IU> divisions3dPrefixSum(divisions3d.size());
divisions3dPrefixSum[0] = 0;
std::partial_sum(divisions3d.begin(), divisions3d.end()-1, divisions3dPrefixSum.begin()+1);
ColLexiCompare<IU,NUO> comp;
IU totsend = C_tuples->getnnz();
#pragma omp parallel for
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
IU start_col = divisions3dPrefixSum[i];
IU end_col = divisions3dPrefixSum[i] + divisions3d[i];
std::tuple<IU, IU, NUO> search_tuple_start(0, start_col, NUO());
std::tuple<IU, IU, NUO> search_tuple_end(0, end_col, NUO());
std::tuple<IU, IU, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp);
std::tuple<IU, IU, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp);
// This type casting is important from semantic point of view
sendcnt[i] = (int)(end_it - start_it);
sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk
sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk
sendprfl[i*3+2] = (int)(divisions3d[i]); // Number of columns in ith chunk
}
std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1);
// Send profile ready. Now need to update the tuples to reflect correct column id after column split.
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
#pragma omp parallel for schedule(static)
for(int j = 0; j < sendcnt[i]; j++){
std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - divisions3dPrefixSum[i];
}
}
MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->GetFiberWorld());
for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3];
std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1);
IU totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0));
std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv])));
#ifdef TIMING
t2 = MPI_Wtime();
#endif
MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->GetFiberWorld());
delete C_tuples;
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tAlltoallv: %lf\n", (t3-t2));
#endif
vector<SpTuples<IU, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers());
#pragma omp parallel for
for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){
recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false);
}
// Free all memory except tempTuples; Because that memory is holding data of newly created local matrices after receiving.
DeleteAll(sendcnt, sendprfl, sdispls);
DeleteAll(recvcnt, recvprfl, rdispls);
MPI_Type_free(&MPI_tuple);
/*
* 3d-reduction ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tReduction time: %lf\n", (t1-t0));
#endif
#ifdef TIMING
t0 = MPI_Wtime();
#endif
/*
* 3d-merge starts
* */
SpTuples<IU, NUO> * merged_tuples = MultiwayMergeHash<SR, IU, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete
#ifdef TIMING
t1 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tMerge_fiber_time: %lf\n", (t1-t0));
#endif
//Create SpDCCol and delete merged_tuples;
UDERO * localResultant = new UDERO(*merged_tuples, false);
delete merged_tuples;
// Do not delete elements of recvChunks, because that would give segmentation fault due to double free
//delete [] recvTuples;
::operator delete(recvTuples);
for(int i = 0; i < recvChunks.size(); i++){
recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault
delete recvChunks[i];
}
vector<SpTuples<IU,NUO>*>().swap(recvChunks);
/*
* 3d-merge ends
* */
std::shared_ptr<CommGrid3D> grid3d;
grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial()));
SpParMat3D<IU, NUO, UDERO> C(localResultant, grid3d, A.isColSplit(), A.isSpecial());
return C;
}
/*
* Parameters:
* - computationKernel: 1 for hash-based, 2 for heap-based
* */
template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB>
SpParMat3D<IU, NUO, UDERO> MemEfficientSpGEMM3D(SpParMat3D<IU, NU1, UDERA> & A, SpParMat3D<IU, NU2, UDERB> & B,
int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int computationKernel, int64_t perProcessMemory){
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
typedef typename UDERA::LocalIT LIA;
typedef typename UDERB::LocalIT LIB;
typedef typename UDERO::LocalIT LIC;
/*
* Check if A and B are multipliable
* */
if(A.getncol() != B.getnrow()){
std::ostringstream outs;
outs << "Can not multiply, dimensions does not match"<< std::endl;
outs << A.getncol() << " != " << B.getnrow() << std::endl;
SpParHelper::Print(outs.str());
MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH);
}
/*
* If provided number of phase is too low or too high then reset value of phase as 1
* */
if(phases < 1 || phases >= B.getncol()){
SpParHelper::Print("[MemEfficientSpGEMM3D]\tThe value of phases is too small or large. Resetting to 1.\n");
phases = 1;
}
double t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; // To time different parts of the function
#ifdef TIMING
MPI_Barrier(B.getcommgrid3D()->GetWorld());
t0 = MPI_Wtime();
#endif
/*
* If per process memory is provided then calculate number of phases
* Otherwise, proceed to multiplication.
* */
if(perProcessMemory > 0) {
int p, calculatedPhases;
MPI_Comm_size(A.getcommgrid3D()->GetLayerWorld(),&p);
int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1);
int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO);
int64_t lannz = A.GetLayerMat()->getlocalnnz();
int64_t gannz = 0;
// Get maximum number of nnz owned by one process
MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetWorld());
//int64_t ginputMem = gannz * perNNZMem_in * 4; // Four pieces per process: one piece of own A and B, one piece of received A and B
int64_t ginputMem = gannz * perNNZMem_in * 5; // One extra copy for safety
// Estimate per layer nnz after multiplication. After this estimation each process would know an estimation of
// how many nnz the corresponding layer will have after the layerwise operation.
int64_t asquareNNZ = EstPerProcessNnzSUMMA(*(A.GetLayerMat()), *(B.GetLayerMat()), true);
int64_t gasquareNNZ;
MPI_Allreduce(&asquareNNZ, &gasquareNNZ, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetFiberWorld());
// Atmost two copies, one of a process's own, another received from fiber reduction
int64_t gasquareMem = gasquareNNZ * perNNZMem_out * 2;
// Calculate estimated average degree after multiplication
int64_t d = ceil( ( ( gasquareNNZ / B.getcommgrid3D()->GetGridLayers() ) * sqrt(p) ) / B.GetLayerMat()->getlocalcols() );
// Calculate per column nnz how left after k-select. Minimum of average degree and k-select parameters.
int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d );
//estimate output memory
int64_t postKselectOutputNNZ = ceil(( (B.GetLayerMat()->getlocalcols() / B.getcommgrid3D()->GetGridLayers() ) * k)/sqrt(p)); // If kselect is run
int64_t postKselectOutputMem = postKselectOutputNNZ * perNNZMem_out * 2;
double remainingMem = perProcessMemory*1000000000 - ginputMem - postKselectOutputMem;
int64_t kselectMem = B.GetLayerMat()->getlocalcols() * k * sizeof(NUO) * 3;
//inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory
if(remainingMem > 0){
calculatedPhases = ceil( (gasquareMem + kselectMem) / remainingMem ); // If kselect is run
}
else calculatedPhases = -1;
int gCalculatedPhases;
MPI_Allreduce(&calculatedPhases, &gCalculatedPhases, 1, MPI_INT, MPI_MAX, A.getcommgrid3D()->GetFiberWorld());
if(gCalculatedPhases > phases) phases = gCalculatedPhases;
}
else{
// Do nothing
}
#ifdef TIMING
MPI_Barrier(B.getcommgrid3D()->GetWorld());
t1 = MPI_Wtime();
mcl3d_symbolictime+=(t1-t0);
//if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tSymbolic stage time: %lf\n", (t1-t0));
#endif
/*
* Calculate, accross fibers, which process should get how many columns after redistribution
* */
vector<LIB> divisions3d;
// Calculate split boundaries as if all contents of the layer is being re-distributed along fiber
// These boundaries will be used later on
B.CalculateColSplitDistributionOfLayer(divisions3d);
/*
* Split B according to calculated number of phases
* For better load balancing split B into nlayers*phases chunks
* */
vector<UDERB*> PiecesOfB;
vector<UDERB*> tempPiecesOfB;
UDERB CopyB = *(B.GetLayerMat()->seqptr());
CopyB.ColSplit(divisions3d, tempPiecesOfB); // Split B into `nlayers` chunks at first
for(int i = 0; i < tempPiecesOfB.size(); i++){
vector<UDERB*> temp;
tempPiecesOfB[i]->ColSplit(phases, temp); // Split each chunk of B into `phases` chunks
for(int j = 0; j < temp.size(); j++){
PiecesOfB.push_back(temp[j]);
}
}
vector<UDERO> toconcatenate;
//if(myrank == 0){
//fprintf(stderr, "[MemEfficientSpGEMM3D]\tRunning with phase: %d\n", phases);
//}
for(int p = 0; p < phases; p++){
/*
* At the start of each phase take appropriate pieces from previously created pieces of local B matrix
* Appropriate means correct pieces so that 3D-merge can be properly load balanced.
* */
vector<LIB> lbDivisions3d; // load balance friendly division
LIB totalLocalColumnInvolved = 0;
vector<UDERB*> targetPiecesOfB; // Pieces of B involved in current phase
for(int i = 0; i < PiecesOfB.size(); i++){
if(i % phases == p){
targetPiecesOfB.push_back(new UDERB(*(PiecesOfB[i])));
lbDivisions3d.push_back(PiecesOfB[i]->getncol());
totalLocalColumnInvolved += PiecesOfB[i]->getncol();
}
}
/*
* Create new local matrix by concatenating appropriately picked pieces
* */
UDERB * OnePieceOfB = new UDERB(0, (B.GetLayerMat())->seqptr()->getnrow(), totalLocalColumnInvolved, 0);
OnePieceOfB->ColConcatenate(targetPiecesOfB);
vector<UDERB*>().swap(targetPiecesOfB);
/*
* Create a new layer-wise distributed matrix with the newly created local matrix for this phase
* This matrix is used in SUMMA multiplication of respective layer
* */
SpParMat<IU, NU2, UDERB> OnePieceOfBLayer(OnePieceOfB, A.getcommgrid3D()->GetLayerWorld());
#ifdef TIMING
t0 = MPI_Wtime();
#endif
/*
* SUMMA Starts
* */
int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication
std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(),
(OnePieceOfBLayer.getcommgrid()).get(),
stages, dummy, dummy);
LIA C_m = A.GetLayerMat()->seqptr()->getnrow();
LIB C_n = OnePieceOfBLayer.seqptr()->getncol();
LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages);
LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages);
SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() );
SpParHelper::GetSetSizes( *(OnePieceOfBLayer.seqptr()), BRecvSizes, (OnePieceOfBLayer.getcommgrid())->GetColWorld() );
// Remotely fetched matrices are stored as pointers
UDERA * ARecv;
UDERB * BRecv;
std::vector< SpTuples<LIC,NUO> *> tomerge;
int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow();
int Bself = (OnePieceOfBLayer.getcommgrid())->GetRankInProcCol();
double Abcast_time = 0;
double Bbcast_time = 0;
double Local_multiplication_time = 0;
for(int i = 0; i < stages; ++i) {
std::vector<LIA> ess;
if(i == Aself){
ARecv = A.GetLayerMat()->seqptr(); // shallow-copy
}
else{
ess.resize(UDERA::esscount);
for(int j=0; j<UDERA::esscount; ++j) {
ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row
}
ARecv = new UDERA(); // first, create the object
}
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Aself != i) {
ARecv->Create(ess);
}
Arr<LIA,NU1> Aarrinfo = ARecv->GetArrays();
for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld());
}
for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_Abcasttime += (t3-t2);
Abcast_time += (t3-t2);
#endif
ess.clear();
if(i == Bself){
BRecv = OnePieceOfBLayer.seqptr(); // shallow-copy
}
else{
ess.resize(UDERB::esscount);
for(int j=0; j<UDERB::esscount; ++j) {
ess[j] = BRecvSizes[j][i];
}
BRecv = new UDERB();
}
MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld());
#ifdef TIMING
t2 = MPI_Wtime();
#endif
if (Bself != i) {
BRecv->Create(ess);
}
Arr<LIB,NU2> Barrinfo = BRecv->GetArrays();
for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld());
}
for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) {
MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld());
}
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_Bbcasttime += (t3-t2);
Bbcast_time += (t3-t2);
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<LIC,NUO> * C_cont;
if(computationKernel == 1){
C_cont = LocalSpGEMMHash<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself, // 'delete B' condition
false); // not to sort each column
}
else if(computationKernel == 2){
C_cont = LocalSpGEMM<SR, NUO>
(*ARecv, *BRecv, // parameters themselves
i != Aself, // 'delete A' condition
i != Bself); // 'delete B' condition
}
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_localspgemmtime += (t3-t2);
Local_multiplication_time += (t3-t2);
#endif
if(!C_cont->isZero()) tomerge.push_back(C_cont);
}
SpHelper::deallocate2D(ARecvSizes, UDERA::esscount);
SpHelper::deallocate2D(BRecvSizes, UDERB::esscount);
#ifdef TIMING
t2 = MPI_Wtime();
#endif
SpTuples<LIC,NUO> * C_tuples;
if(computationKernel == 1) C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, true); // Delete input arrays and sort
else if(computationKernel == 2) C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n, true); // Delete input arrays and sort
#ifdef TIMING
t3 = MPI_Wtime();
mcl3d_SUMMAmergetime += (t3-t2);
#endif
#ifdef TIMING
if(myrank == 0){
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAbcast_time: %lf\n", p, Abcast_time);
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tBbcast_time: %lf\n", p, Bbcast_time);
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tLocal_multiplication_time: %lf\n", p, Local_multiplication_time);
fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA Merge time: %lf\n", p, (t3-t2));
}
#endif
/*
* SUMMA Ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_SUMMAtime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA time: %lf\n", p, (t1-t0));
#endif
/*
* 3d-reduction starts
* */
#ifdef TIMING
t0 = MPI_Wtime();
t2 = MPI_Wtime();
#endif
MPI_Datatype MPI_tuple;
MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple);
MPI_Type_commit(&MPI_tuple);
/*
* Create a profile with information regarding data to be sent and received between layers
* These memory allocation needs to be `int` specifically because some of these arrays would be used in communication
* This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement
* */
int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()];
int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3];
int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()]();
vector<LIC> lbDivisions3dPrefixSum(lbDivisions3d.size());
lbDivisions3dPrefixSum[0] = 0;
std::partial_sum(lbDivisions3d.begin(), lbDivisions3d.end()-1, lbDivisions3dPrefixSum.begin()+1);
ColLexiCompare<LIC,NUO> comp;
LIC totsend = C_tuples->getnnz();
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of alltoall information: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
#pragma omp parallel for
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
LIC start_col = lbDivisions3dPrefixSum[i];
LIC end_col = lbDivisions3dPrefixSum[i] + lbDivisions3d[i];
std::tuple<LIC, LIC, NUO> search_tuple_start(0, start_col, NUO());
std::tuple<LIC, LIC, NUO> search_tuple_end(0, end_col, NUO());
std::tuple<LIC, LIC, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp);
std::tuple<LIC, LIC, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp);
// This type casting is important from semantic point of view
sendcnt[i] = (int)(end_it - start_it);
sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk
sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk
sendprfl[i*3+2] = (int)(lbDivisions3d[i]); // Number of columns in ith chunk
}
std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1);
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoall data ready: %lf\n", p, (t3-t2));
#endif
// Send profile ready. Now need to update the tuples to reflect correct column id after column split.
#ifdef TIMING
t2 = MPI_Wtime();
#endif
for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){
#pragma omp parallel for schedule(static)
for(int j = 0; j < sendcnt[i]; j++){
std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - lbDivisions3dPrefixSum[i];
}
}
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoallv data ready: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->GetFiberWorld());
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoall: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3];
std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1);
LIC totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0));
std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv])));
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of receive data: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->GetFiberWorld());
delete C_tuples;
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoallv: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
vector<SpTuples<LIC, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers());
#pragma omp parallel for
for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){
recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false);
}
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\trecvChunks creation: %lf\n", p, (t3-t2));
#endif
#ifdef TIMING
t2 = MPI_Wtime();
#endif
// Free all memory except tempTuples; Because that is holding data of newly created local matrices after receiving.
DeleteAll(sendcnt, sendprfl, sdispls);
DeleteAll(recvcnt, recvprfl, rdispls);
MPI_Type_free(&MPI_tuple);
#ifdef TIMING
t3 = MPI_Wtime();
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMemory freeing: %lf\n", p, (t3-t2));
#endif
/*
* 3d-reduction ends
* */
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_reductiontime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tReduction time: %lf\n", p, (t1-t0));
#endif
#ifdef TIMING
t0 = MPI_Wtime();
#endif
/*
* 3d-merge starts
* */
SpTuples<LIC, NUO> * merged_tuples;
if(computationKernel == 1) merged_tuples = MultiwayMergeHash<SR, LIC, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete
else if(computationKernel == 2) merged_tuples = MultiwayMerge<SR, LIC, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false); // Do not delete
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_3dmergetime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\t3D Merge time: %lf\n", p, (t1-t0));
#endif
/*
* 3d-merge ends
* */
#ifdef TIMING
t0 = MPI_Wtime();
#endif
// Do not delete elements of recvChunks, because that would give segmentation fault due to double free
::operator delete(recvTuples);
for(int i = 0; i < recvChunks.size(); i++){
recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault
delete recvChunks[i]; // As the patch is used, now delete each element of recvChunks
}
vector<SpTuples<LIC,NUO>*>().swap(recvChunks); // As the patch is used, now delete recvChunks
// This operation is not needed if result can be used and discareded right away
// This operation is being done because it is needed by MCLPruneRecoverySelect
UDERO * phaseResultant = new UDERO(*merged_tuples, false);
delete merged_tuples;
SpParMat<IU, NUO, UDERO> phaseResultantLayer(phaseResultant, A.getcommgrid3D()->GetLayerWorld());
MCLPruneRecoverySelect(phaseResultantLayer, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion);
#ifdef TIMING
t1 = MPI_Wtime();
mcl3d_kselecttime += (t1-t0);
if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMCLPruneRecoverySelect time: %lf\n",p, (t1-t0));
#endif
toconcatenate.push_back(phaseResultantLayer.seq());
#ifdef TIMING
if(myrank == 0) fprintf(stderr, "***\n");
#endif
}
for(int i = 0; i < PiecesOfB.size(); i++) delete PiecesOfB[i];
std::shared_ptr<CommGrid3D> grid3d;
grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial()));
UDERO * localResultant = new UDERO(0, A.GetLayerMat()->seqptr()->getnrow(), divisions3d[A.getcommgrid3D()->GetRankInFiber()], 0);
localResultant->ColConcatenate(toconcatenate);
SpParMat3D<IU, NUO, UDERO> C3D(localResultant, grid3d, A.isColSplit(), A.isSpecial());
return C3D;
}
}
#endif
|
convolution_pack8_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
const signed char* kptr = weight_data_int8.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const signed char* sptr = m.row<signed char>(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
__m128i _val0 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8]);
__m128i _val1 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8 + 1]);
__m128i _val2 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8 + 2]);
__m128i _val3 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8 + 3]);
__m128i _val4 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8 + 4]);
__m128i _val5 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8 + 5]);
__m128i _val6 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8 + 6]);
__m128i _val7 = _mm_set1_epi16((short)sptr[space_ofs[k] * 8 + 7]);
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _w0 = _mm_loadl_epi64((const __m128i*)kptr);
_w0 = _mm_unpacklo_epi8(_w0, _mm_cmpgt_epi8(_mm_setzero_si128(), _w0));
__m128i _sl = _mm_mullo_epi16(_val0, _w0);
__m128i _sh = _mm_mulhi_epi16(_val0, _w0);
__m128i _s0 = _mm_unpacklo_epi16(_sl, _sh);
__m128i _s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
__m128i _w1 = _mm_loadl_epi64((const __m128i*)(kptr + 8));
_w1 = _mm_unpacklo_epi8(_w1, _mm_cmpgt_epi8(_mm_setzero_si128(), _w1));
_sl = _mm_mullo_epi16(_val1, _w1);
_sh = _mm_mulhi_epi16(_val1, _w1);
_s0 = _mm_unpacklo_epi16(_sl, _sh);
_s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
__m128i _w2 = _mm_loadl_epi64((const __m128i*)(kptr + 16));
_w2 = _mm_unpacklo_epi8(_w2, _mm_cmpgt_epi8(_mm_setzero_si128(), _w2));
_sl = _mm_mullo_epi16(_val2, _w2);
_sh = _mm_mulhi_epi16(_val2, _w2);
_s0 = _mm_unpacklo_epi16(_sl, _sh);
_s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
__m128i _w3 = _mm_loadl_epi64((const __m128i*)(kptr + 24));
_w3 = _mm_unpacklo_epi8(_w3, _mm_cmpgt_epi8(_mm_setzero_si128(), _w3));
_sl = _mm_mullo_epi16(_val3, _w3);
_sh = _mm_mulhi_epi16(_val3, _w3);
_s0 = _mm_unpacklo_epi16(_sl, _sh);
_s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
__m128i _w4 = _mm_loadl_epi64((const __m128i*)(kptr + 32));
_w4 = _mm_unpacklo_epi8(_w4, _mm_cmpgt_epi8(_mm_setzero_si128(), _w4));
_sl = _mm_mullo_epi16(_val4, _w4);
_sh = _mm_mulhi_epi16(_val4, _w4);
_s0 = _mm_unpacklo_epi16(_sl, _sh);
_s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
__m128i _w5 = _mm_loadl_epi64((const __m128i*)(kptr + 40));
_w5 = _mm_unpacklo_epi8(_w5, _mm_cmpgt_epi8(_mm_setzero_si128(), _w5));
_sl = _mm_mullo_epi16(_val5, _w5);
_sh = _mm_mulhi_epi16(_val5, _w5);
_s0 = _mm_unpacklo_epi16(_sl, _sh);
_s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
__m128i _w6 = _mm_loadl_epi64((const __m128i*)(kptr + 48));
_w6 = _mm_unpacklo_epi8(_w6, _mm_cmpgt_epi8(_mm_setzero_si128(), _w6));
_sl = _mm_mullo_epi16(_val6, _w6);
_sh = _mm_mulhi_epi16(_val6, _w6);
_s0 = _mm_unpacklo_epi16(_sl, _sh);
_s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
__m128i _w7 = _mm_loadl_epi64((const __m128i*)(kptr + 56));
_w7 = _mm_unpacklo_epi8(_w7, _mm_cmpgt_epi8(_mm_setzero_si128(), _w7));
_sl = _mm_mullo_epi16(_val7, _w7);
_sh = _mm_mulhi_epi16(_val7, _w7);
_s0 = _mm_unpacklo_epi16(_sl, _sh);
_s1 = _mm_unpackhi_epi16(_sl, _sh);
_sum0 = _mm_add_epi32(_sum0, _s0);
_sum1 = _mm_add_epi32(_sum1, _s1);
kptr += 64;
}
}
_mm_storeu_si128((__m128i*)(outptr + j * 8), _sum0);
_mm_storeu_si128((__m128i*)(outptr + j * 8 + 4), _sum1);
}
outptr += outw * 8;
}
}
}
|
GB_binop__minus_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fc32)
// A*D function (colscale): GB (_AxD__minus_fc32)
// D*A function (rowscale): GB (_DxB__minus_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fc32)
// C=scalar+B GB (_bind1st__minus_fc32)
// C=scalar+B' GB (_bind1st_tran__minus_fc32)
// C=A+scalar GB (_bind2nd__minus_fc32)
// C=A'+scalar GB (_bind2nd_tran__minus_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_minus (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_minus (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FC32 || GxB_NO_MINUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_minus (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_minus (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_minus (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_minus (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dynamicChunk.c | #include <stdio.h>
#include <omp.h>
int foo (void)
{
double a[1000];
int i;
int n;
scanf("%d",&n);
#pragma omp for schedule(dynamic,50)
for (i=0;i<n;i++)
{
a[i]=(double)i/2.0;
}
printf("a[878]=%f\n",a[878]);
return 0;
}
|
omp-for-simple.c | #include <stdio.h>
int main() {
int i,j;
#pragma omp parallel for
for(i = 0; i < 11; i++)
{
printf("Hello World %d\n", i);
}
return 0;
}
|
GB_unop__identity_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_int16)
// op(A') function: GB (_unop_tran__identity_uint64_int16)
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_int16)
(
uint64_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FastFourierTransform.h | //Copyright (c) 2019 Mauricio Kugler, Nagoya Institute of Technology
#ifndef FASTFOURIERTRANSFORMH
#define FASTFOURIERTRANSFORMH
#include <complex>
#include <cmath>
using namespace std;
class FastFourierTransform
{
private:
unsigned int N1,N2,N3;
unsigned int M1,M2,M3;
unsigned int K1,K2,K3;
float F1,F2,F3;
unsigned int *D1, *D2, *D3;
complex<float> *w1, *w2, *w3;
complex<float> *v1, *v2, *v3;
complex<float> **z1a;
complex<float> *y1;
complex<float> ***z2a, ***u2a;
complex<float> **z2b, **u2b;
complex<float> **y2;
complex<float> ****z3a, ****u3a, ****g3a;
complex<float> ***z3b, ***u3b, ***g3b;
complex<float> ***y3;
float *r1;
float **r2;
float ***r3;
void initFFT(unsigned int n1, unsigned int n2, unsigned int n3, unsigned int d);
void inline fft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *w, unsigned int *D);
void inline ifft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *v, unsigned int *D, float F);
public:
FastFourierTransform(unsigned int n1);
FastFourierTransform(unsigned int n1, unsigned int n2);
FastFourierTransform(unsigned int n1, unsigned int n2, unsigned int n3);
~FastFourierTransform();
complex<float> inline *fft1(float *x); //complex-conjugate results!
complex<float> inline **fft2(float **x); //complex-conjugate results!
complex<float> inline ***fft3(float ***x);
float inline *ifft1(complex<float> *x);
float inline **ifft2(complex<float> **x);
float inline ***ifft3(complex<float> ***x);
};
void inline FastFourierTransform::fft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *w, unsigned int *D)
{
for(unsigned int i=0;i<K;i++) {
unsigned int mask = 0xffffffff<<i;
for(unsigned int j=0;j<M;j++) {
z[i+1][j<<1] = z[i][j] + z[i][j+M];
z[i+1][(j<<1)+1] = w[j&mask]*(z[i][j] - z[i][j+M]);
}
}
for(unsigned int i=0;i<N;i++) {
unsigned int j = D[i];
x[i] = z[K][j];
}
}
void inline FastFourierTransform::ifft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *v, unsigned int *D, float F)
{
for(unsigned int i=0;i<K;i++) {
unsigned int mask = 0xffffffff<<i;
for(unsigned int j=0;j<M;j++) {
z[i+1][j<<1] = z[i][j] + z[i][j+M];
z[i+1][(j<<1)+1] = v[j&mask]*(z[i][j] - z[i][j+M]);
}
}
for(unsigned int i=0;i<N;i++) {
unsigned int j = D[i];
x[i] = z[K][j]*F;
}
}
complex<float> inline *FastFourierTransform::fft1(float *x)
{
for(unsigned int i=0;i<N1;i++) {
z1a[0][i] = complex<float>(x[i],0);
}
fft(y1,z1a,N1,M1,K1,w1,D1);
return(y1);
}
float inline *FastFourierTransform::ifft1(complex<float> *x)
{
for(unsigned int i=0;i<N1;i++) {
z1a[0][i] = x[i];
}
ifft(y1,z1a,N1,M1,K1,v1,D1,F1);
for(unsigned int i=0;i<N1;i++) {
r1[i] = y1[i].real();
}
return(r1);
}
complex<float> inline **FastFourierTransform::fft2(float **x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
z2a[i][0][j] = complex<float>(x[i][j],0);
}
fft(z2b[i],z2a[i],N2,M2,K2,w2,D2);
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N1;j++) {
u2a[i][0][j] = z2b[j][i];
}
fft(u2b[i],u2a[i],N1,M1,K1,w1,D1);
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
y2[i][j] = u2b[j][i];
}
}
return(y2);
}
float inline **FastFourierTransform::ifft2(complex<float> **x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
z2a[i][0][j] = x[i][j];
}
ifft(z2b[i],z2a[i],N2,M2,K2,v2,D2,F2);
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N1;j++) {
u2a[i][0][j] = z2b[j][i];
}
ifft(u2b[i],u2a[i],N1,M1,K1,v1,D1,F1);
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
r2[i][j] = u2b[j][i].real();
}
}
return(r2);
}
complex<float> inline ***FastFourierTransform::fft3(float ***x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
z3a[i][j][0][k] = complex<float>(x[i][j][k],0);
}
fft(z3b[i][j],z3a[i][j],N3,M3,K3,w3,D3);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N3;j++) {
for(unsigned int k=0;k<N1;k++) {
u3a[i][j][0][k] = z3b[k][i][j];
}
fft(u3b[i][j],u3a[i][j],N1,M1,K1,w1,D1);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N3;i++) {
for(unsigned int j=0;j<N1;j++) {
for(unsigned int k=0;k<N2;k++) {
g3a[i][j][0][k] = u3b[k][i][j];
}
fft(g3b[i][j],g3a[i][j],N2,M2,K2,w2,D2);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
y3[i][j][k] = g3b[k][i][j];
}
}
}
return(y3);
}
float inline ***FastFourierTransform::ifft3(complex<float> ***x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
z3a[i][j][0][k] = x[i][j][k];
}
ifft(z3b[i][j],z3a[i][j],N3,M3,K3,v3,D3,F3);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N3;j++) {
for(unsigned int k=0;k<N1;k++) {
u3a[i][j][0][k] = z3b[k][i][j];
}
ifft(u3b[i][j],u3a[i][j],N1,M1,K1,v1,D1,F1);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N3;i++) {
for(unsigned int j=0;j<N1;j++) {
for(unsigned int k=0;k<N2;k++) {
g3a[i][j][0][k] = u3b[k][i][j];
}
ifft(g3b[i][j],g3a[i][j],N2,M2,K2,v2,D2,F2);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
r3[i][j][k] = g3b[k][i][j].real();
}
}
}
return(r3);
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-252,256)),ceild(16*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(8*t1+Nx+13,256)),floord(16*t2+Nx+12,256)),floord(16*t3+Nx+12,256)),floord(16*t1-16*t2+Nz+Nx+11,256));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),256*t4+254),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
DataTypeConversions.h | //
// Created by raver119 on 21.11.17.
//
#ifndef LIBND4J_DATATYPECONVERSIONS_H
#define LIBND4J_DATATYPECONVERSIONS_H
#include <pointercast.h>
#include <helpers/logger.h>
#include <op_boilerplate.h>
#include <array/DataType.h>
#include <types/float16.h>
#include <helpers/BitwiseUtils.h>
namespace nd4j {
template <typename T>
class DataTypeConversions {
public:
static FORCEINLINE void convertType(T* buffer, void* src, DataType dataType, ByteOrder order, Nd4jLong length) {
bool isBe = BitwiseUtils::isBE();
bool canKeep = (isBe && order == ByteOrder::BE) || (!isBe && order == ByteOrder::LE);
switch (dataType) {
case DataType_FLOAT: {
if (std::is_same<T, float>::value && canKeep) {
memcpy(buffer, src, length * sizeof(T));
} else {
auto tmp = reinterpret_cast<float *>(src);
#if __GNUC__ <= 4
if (!canKeep)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
else
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = static_cast<T>(tmp[e]);
#else
//#pragma omp parallel for simd schedule(guided)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
#endif
}
}
break;
case DataType_DOUBLE: {
if (std::is_same<T, double>::value && canKeep) {
memcpy(buffer, src, length * sizeof(T));
} else {
auto tmp = reinterpret_cast<double *>(src);
#if __GNUC__ <= 4
if (!canKeep)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
else
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = static_cast<T>(tmp[e]);
#else
//#pragma omp parallel for simd schedule(guided)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
#endif
}
}
break;
case DataType_HALF: {
if (std::is_same<T, float16>::value && canKeep) {
memcpy(buffer, src, length * sizeof(T));
} else {
auto tmp = reinterpret_cast<float16 *>(src);
#if __GNUC__ <= 4
if (!canKeep)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
else
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = static_cast<T>(tmp[e]);
#else
//#pragma omp parallel for simd schedule(guided)
for (Nd4jLong e = 0; e < length; e++)
buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e]));
#endif
}
}
break;
default: {
nd4j_printf("Unsupported DataType requested: [%i]\n", static_cast<int>(dataType));
throw std::runtime_error("Unsupported DataType");
}
}
}
};
}
#endif //LIBND4J_DATATYPECONVERSIONS_H
|
gamma_index_ivfpq.h | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This faiss source code is licensed under the MIT license.
* https://github.com/facebookresearch/faiss/blob/master/LICENSE
*
*
* The works below are modified based on faiss:
* 1. Replace the static batch indexing with real time indexing
* 2. Add the fine-grained sort after PQ coarse sort
* 3. Add the numeric field and bitmap filters in the process of searching
*
* Modified works copyright 2019 The Gamma Authors.
*
* The modified codes are licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*
*/
#ifndef GAMMA_INDEX_IVFPQ_H_
#define GAMMA_INDEX_IVFPQ_H_
#include <unistd.h>
#include <atomic>
#include "faiss/IndexIVF.h"
#include "faiss/IndexIVFPQ.h"
#include "faiss/VectorTransform.h"
#include "faiss/IndexHNSW.h"
#include "faiss/InvertedLists.h"
#include "faiss/impl/FaissAssert.h"
#include "faiss/impl/io.h"
#include "faiss/index_io.h"
#include "faiss/utils/Heap.h"
#include "faiss/utils/distances.h"
#include "faiss/utils/hamming.h"
#include "faiss/utils/utils.h"
#include "field_range_index.h"
#include "gamma_common_data.h"
#include "gamma_index_flat.h"
#include "gamma_scanner.h"
#include "log.h"
#include "memory_raw_vector.h"
#include "raw_vector.h"
#include "realtime_invert_index.h"
#include "retrieval_model.h"
#include "utils.h"
namespace tig_gamma {
/// statistics are robust to internal threading, but not if
/// IndexIVFPQ::search_preassigned is called by multiple threads
struct IndexIVFPQStats {
size_t nrefine; // nb of refines (IVFPQR)
size_t n_hamming_pass;
// nb of passed Hamming distance tests (for polysemous)
// timings measured with the CPU RTC
// on all threads
size_t search_cycles;
size_t refine_cycles; // only for IVFPQR
IndexIVFPQStats() { reset(); }
void reset(){};
};
// global var that collects them all
extern IndexIVFPQStats indexIVFPQ_stats;
// namespace {
using idx_t = faiss::Index::idx_t;
static uint64_t get_cycles() {
#ifdef __x86_64__
uint32_t high, low;
asm volatile("rdtsc \n\t" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | (low);
#else
return 0;
#endif
}
#define TIC t0 = get_cycles()
#define TOC get_cycles() - t0
/** QueryTables manages the various ways of searching an
* IndexIVFPQ. The code contains a lot of branches, depending on:
* - metric_type: are we computing L2 or Inner product similarity?
* - by_residual: do we encode raw vectors or residuals?
* - use_precomputed_table: are x_R|x_C tables precomputed?
* - polysemous_ht: are we filtering with polysemous codes?
*/
struct QueryTables {
/*****************************************************
* General data from the IVFPQ
*****************************************************/
const faiss::IndexIVFPQ &ivfpq;
const faiss::IVFSearchParameters *params;
// copied from IndexIVFPQ for easier access
int d;
const faiss::ProductQuantizer &pq;
faiss::MetricType metric_type;
bool by_residual;
int use_precomputed_table;
int polysemous_ht;
// pre-allocated data buffers
float *sim_table, *sim_table_2;
float *residual_vec, *decoded_vec;
// single data buffer
std::vector<float> mem;
// for table pointers
std::vector<const float *> sim_table_ptrs;
explicit QueryTables(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params,
faiss::MetricType metric_type)
: ivfpq(ivfpq),
d(ivfpq.d),
pq(ivfpq.pq),
metric_type(metric_type),
by_residual(ivfpq.by_residual),
use_precomputed_table(ivfpq.use_precomputed_table) {
mem.resize(pq.ksub * pq.M * 2 + d * 2);
sim_table = mem.data();
sim_table_2 = sim_table + pq.ksub * pq.M;
residual_vec = sim_table_2 + pq.ksub * pq.M;
decoded_vec = residual_vec + d;
// for polysemous
polysemous_ht = ivfpq.polysemous_ht;
if (auto ivfpq_params =
dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) {
polysemous_ht = ivfpq_params->polysemous_ht;
}
if (polysemous_ht != 0) {
q_code.resize(pq.code_size);
}
init_list_cycles = 0;
sim_table_ptrs.resize(pq.M);
}
/*****************************************************
* What we do when query is known
*****************************************************/
// field specific to query
const float *qi;
// query-specific intialization
void init_query(const float *qi) {
this->qi = qi;
if (metric_type == faiss::METRIC_INNER_PRODUCT)
init_query_IP();
else
init_query_L2();
if (!by_residual && polysemous_ht != 0) pq.compute_code(qi, q_code.data());
}
void init_query_IP() {
// precompute some tables specific to the query qi
pq.compute_inner_prod_table(qi, sim_table);
}
void init_query_L2() {
if (!by_residual) {
pq.compute_distance_table(qi, sim_table);
} else if (use_precomputed_table) {
pq.compute_inner_prod_table(qi, sim_table_2);
}
}
/*****************************************************
* When inverted list is known: prepare computations
*****************************************************/
// fields specific to list
long key;
float coarse_dis;
std::vector<uint8_t> q_code;
uint64_t init_list_cycles;
/// once we know the query and the centroid, we can prepare the
/// sim_table that will be used for accumulation
/// and dis0, the initial value
float precompute_list_tables() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
dis0 = precompute_list_tables_IP();
else
dis0 = precompute_list_tables_L2();
}
init_list_cycles += TOC;
return dis0;
}
float precompute_list_table_pointers() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
FAISS_THROW_MSG("not implemented");
else
dis0 = precompute_list_table_pointers_L2();
}
init_list_cycles += TOC;
return dis0;
}
/*****************************************************
* compute tables for inner prod
*****************************************************/
float precompute_list_tables_IP() {
// prepare the sim_table that will be used for accumulation
// and dis0, the initial value
ivfpq.quantizer->reconstruct(key, decoded_vec);
// decoded_vec = centroid
float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d);
if (polysemous_ht) {
for (int i = 0; i < d; i++) {
residual_vec[i] = qi[i] - decoded_vec[i];
}
pq.compute_code(residual_vec, q_code.data());
}
return dis0;
}
/*****************************************************
* compute tables for L2 distance
*****************************************************/
float precompute_list_tables_L2() {
float dis0 = 0;
if (use_precomputed_table == 0 || use_precomputed_table == -1) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_distance_table(residual_vec, sim_table);
if (polysemous_ht != 0) {
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 1) {
dis0 = coarse_dis;
faiss::fvec_madd(pq.M * pq.ksub,
&ivfpq.precomputed_table[key * pq.ksub * pq.M], -2.0,
sim_table_2, sim_table);
if (polysemous_ht != 0) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
const float *qtab = sim_table_2; // query-specific table
float *ltab = sim_table; // (output) list-specific table
long k = key;
for (size_t cm = 0; cm < cpq.M; cm++) {
// compute PQ index
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
// get corresponding table
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
if (polysemous_ht == 0) {
// sum up with query-specific table
faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab);
ltab += Mf * pq.ksub;
qtab += Mf * pq.ksub;
} else {
for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) {
q_code[m] =
faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab);
pc += pq.ksub;
ltab += pq.ksub;
qtab += pq.ksub;
}
}
}
}
return dis0;
}
float precompute_list_table_pointers_L2() {
float dis0 = 0;
if (use_precomputed_table == 1) {
dis0 = coarse_dis;
const float *s = &ivfpq.precomputed_table[key * pq.ksub * pq.M];
for (size_t m = 0; m < pq.M; m++) {
sim_table_ptrs[m] = s;
s += pq.ksub;
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
long k = key;
int m0 = 0;
for (size_t cm = 0; cm < cpq.M; cm++) {
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
for (int m = m0; m < m0 + Mf; m++) {
sim_table_ptrs[m] = pc;
pc += pq.ksub;
}
m0 += Mf;
}
} else {
FAISS_THROW_MSG("need precomputed tables");
}
if (polysemous_ht) {
FAISS_THROW_MSG("not implemented");
// Not clear that it makes sense to implemente this,
// because it costs M * ksub, which is what we wanted to
// avoid with the tables pointers.
}
return dis0;
}
};
template <class C>
struct KnnSearchResults {
idx_t key;
const idx_t *ids;
// heap params
size_t k;
float *heap_sim;
idx_t *heap_ids;
size_t nup;
inline void add(idx_t j, float dis) {
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
idx_t id = ids ? ids[j] : (key << 32 | j);
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
};
/*****************************************************
* Scaning the codes.
* The scanning functions call their favorite precompute_*
* function to precompute the tables they need.
*****************************************************/
template <typename IDType, faiss::MetricType METRIC_TYPE>
struct IVFPQScannerT : QueryTables {
const uint8_t *list_codes;
const IDType *list_ids;
size_t list_size;
explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: QueryTables(ivfpq, params, METRIC_TYPE) {
FAISS_THROW_IF_NOT(pq.nbits == 8);
}
float dis0;
void init_list(idx_t list_no, float coarse_dis, int mode) {
this->key = list_no;
this->coarse_dis = coarse_dis;
if (mode == 2) {
dis0 = precompute_list_tables();
} else if (mode == 1) {
dis0 = precompute_list_table_pointers();
}
}
/// tables are not precomputed, but pointers are provided to the
/// relevant X_c|x_r tables
template <class SearchResultType>
void scan_list_with_pointer(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
for (size_t j = 0; j < ncode; j++) {
float dis = dis0;
const float *tab = sim_table_2;
for (size_t m = 0; m < pq.M; m++) {
int ci = *codes++;
dis += sim_table_ptrs[m][ci] - 2 * tab[ci];
tab += pq.ksub;
}
res.add(j, dis);
}
}
/// nothing is precomputed: access residuals on-the-fly
template <class SearchResultType>
void scan_on_the_fly_dist(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
const float *dvec;
float dis0 = 0;
if (by_residual) {
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
ivfpq.quantizer->reconstruct(key, residual_vec);
dis0 = faiss::fvec_inner_product(residual_vec, qi, d);
} else {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
}
dvec = residual_vec;
} else {
dvec = qi;
dis0 = 0;
}
for (size_t j = 0; j < ncode; j++) {
pq.decode(codes, decoded_vec);
codes += pq.code_size;
float dis;
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d);
} else {
dis = faiss::fvec_L2sqr(decoded_vec, dvec, d);
}
res.add(j, dis);
}
}
/*****************************************************
* Scanning codes with polysemous filtering
*****************************************************/
template <class HammingComputer, class SearchResultType>
void scan_list_polysemous_hc(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
int ht = ivfpq.polysemous_ht;
size_t n_hamming_pass = 0;
int code_size = pq.code_size;
HammingComputer hc(q_code.data(), code_size);
for (size_t j = 0; j < ncode; j++) {
const uint8_t *b_code = codes;
int hd = hc.hamming(b_code);
if (hd < ht) {
n_hamming_pass++;
float dis = dis0;
const float *tab = sim_table;
for (size_t m = 0; m < pq.M; m++) {
dis += tab[*b_code++];
tab += pq.ksub;
}
res.add(j, dis);
}
codes += code_size;
}
#pragma omp critical
{ indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; }
}
template <class SearchResultType>
void scan_list_polysemous(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
switch (pq.code_size) {
#define HANDLE_CODE_SIZE(cs) \
case cs: \
scan_list_polysemous_hc<faiss::HammingComputer##cs, SearchResultType>( \
ncode, codes, res); \
break
HANDLE_CODE_SIZE(4);
HANDLE_CODE_SIZE(8);
HANDLE_CODE_SIZE(16);
HANDLE_CODE_SIZE(20);
HANDLE_CODE_SIZE(32);
HANDLE_CODE_SIZE(64);
#undef HANDLE_CODE_SIZE
default:
if (pq.code_size % 8 == 0)
scan_list_polysemous_hc<faiss::HammingComputerM8, SearchResultType>(
ncode, codes, res);
else
scan_list_polysemous_hc<faiss::HammingComputerM4, SearchResultType>(
ncode, codes, res);
break;
}
}
};
/* struct GammaInvertedListScanner : faiss::InvertedListScanner { */
/* GammaInvertedListScanner() { retrieval_context_ = nullptr; } */
/* virtual size_t scan_codes_pointer(size_t ncode, const uint8_t **codes, */
/* const idx_t *ids, float *heap_sim, */
/* idx_t *heap_ids, size_t k) = 0; */
/* void set_search_context(RetrievalContext *retrieval_context) { */
/* this->retrieval_context_ = retrieval_context; */
/* } */
/* RetrievalContext *retrieval_context_; */
/* }; */
template <faiss::MetricType metric, class C>
struct GammaIVFFlatScanner : GammaInvertedListScanner {
size_t d;
GammaIVFFlatScanner(size_t d) : d(d) {}
const float *xi;
void set_query(const float *query) override { this->xi = query; }
idx_t list_no;
void set_list(idx_t list_no, float /* coarse_dis */) override {
this->list_no = list_no;
}
float distance_to_code(const uint8_t *code) const override {
const float *yj = (float *)code;
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
return dis;
}
inline size_t scan_codes(size_t list_size, const uint8_t *codes,
const idx_t *ids, float *simi, idx_t *idxi,
size_t k) const override {
RawVector *raw_vec = (RawVector *)codes;
size_t nup = 0;
for (size_t j = 0; j < list_size; j++) {
if (ids[j] & realtime::kDelIdxMask) continue;
idx_t vid = ids[j] & realtime::kRecoverIdxMask;
if (vid < 0) continue;
if (retrieval_context_->IsValid(vid) == false) continue;
ScopeVector svec;
raw_vec->GetVector(vid, svec);
const float *yj = (const float *)svec.Get();
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
if (retrieval_context_->IsSimilarScoreValid(dis) && C::cmp(simi[0], dis)) {
faiss::heap_pop<C>(k, simi, idxi);
faiss::heap_push<C>(k, simi, idxi, dis, vid);
nup++;
}
}
return nup;
}
size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim, idx_t *heap_ids,
size_t k) {
return 0;
}
};
class IVFPQRetrievalParameters : public RetrievalParameters {
public:
IVFPQRetrievalParameters() : RetrievalParameters() {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
}
IVFPQRetrievalParameters(bool parallel_on_queries, int recall_num, int nprobe,
enum DistanceComputeType type, bool ivf_flat) {
parallel_on_queries_ = parallel_on_queries;
recall_num_ = recall_num;
nprobe_ = nprobe;
ivf_flat_ = ivf_flat;
distance_compute_type_ = type;
}
IVFPQRetrievalParameters(enum DistanceComputeType type) {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
distance_compute_type_ = type;
}
virtual ~IVFPQRetrievalParameters() {}
int RecallNum() { return recall_num_; }
void SetRecallNum(int recall_num) { recall_num_ = recall_num; }
int Nprobe() { return nprobe_; }
void SetNprobe(int nprobe) { nprobe_ = nprobe; }
bool ParallelOnQueries() { return parallel_on_queries_; }
void SetParallelOnQueries(bool parallel_on_queries) {
parallel_on_queries_ = parallel_on_queries;
}
bool IvfFlat() { return ivf_flat_; }
void SetIvfFlat(bool ivf_flat) { ivf_flat_ = ivf_flat; }
protected:
// parallelize over queries or ivf lists
bool parallel_on_queries_;
int recall_num_;
int nprobe_;
bool ivf_flat_;
};
struct IVFPQModelParams;
struct GammaIVFPQIndex : GammaFLATIndex, faiss::IndexIVFPQ {
GammaIVFPQIndex();
virtual ~GammaIVFPQIndex();
faiss::InvertedListScanner *get_InvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaIVFFlatScanner(
size_t d, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaInvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
int Init(const std::string &model_parameters, int indexing_size) override;
RetrievalParameters *Parse(const std::string ¶meters) override;
int Indexing() override;
bool Add(int n, const uint8_t *vec);
int Update(const std::vector<int64_t> &ids,
const std::vector<const uint8_t *> &vecs);
// assign the vectors, then call search_preassign
int Search(RetrievalContext *retrieval_context, int n, const uint8_t *x,
int k, float *distances, idx_t *labels);
void search_preassigned(RetrievalContext *retrieval_context, int n,
const float *x, const float *applied_x, int k, const idx_t *keys,
const float *coarse_dis, float *distances,
idx_t *labels, int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
void search_ivf_flat(RetrievalContext *retrieval_context, int n,
const float *x, int k, const idx_t *keys,
const float *coarse_dis, float *distances, idx_t *labels,
int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
long GetTotalMemBytes() override {
if (!rt_invert_index_ptr_) {
return 0;
}
return rt_invert_index_ptr_->GetTotalMemBytes();
}
int Dump(const std::string &dir) override;
int Load(const std::string &index_dir) override;
virtual void copy_subset_to(faiss::IndexIVF &other, int subset_type, idx_t a1,
idx_t a2) const;
int Delete(const std::vector<int64_t> &ids);
int indexed_vec_count_;
realtime::RTInvertIndex *rt_invert_index_ptr_;
bool compaction_;
size_t compact_bucket_no_;
uint64_t compacted_num_;
uint64_t updated_num_;
int d_;
DistanceComputeType metric_type_;
faiss::VectorTransform *opq_;
// 0 is FlatL2, 1 is HNSWFlat
int quantizer_type_;
#ifdef PERFORMANCE_TESTING
std::atomic<uint64_t> search_count_;
int add_count_;
#endif
IVFPQModelParams *model_param_;
};
template <faiss::MetricType METRIC_TYPE, class C, int precompute_mode>
struct GammaIVFPQScanner : IVFPQScannerT<idx_t, METRIC_TYPE>,
GammaInvertedListScanner {
const GammaIVFPQIndex &gamma_ivfpq_;
bool store_pairs_;
GammaIVFPQScanner(const GammaIVFPQIndex &gamma_ivfpq, bool store_pairs)
: IVFPQScannerT<idx_t, METRIC_TYPE>(gamma_ivfpq, nullptr),
gamma_ivfpq_(gamma_ivfpq) {
store_pairs_ = store_pairs;
}
template <class SearchResultType>
void scan_list_with_table(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
size_t j = 0;
for (; j < ncode; j++) {
if (res.ids[j] & realtime::kDelIdxMask) {
codes += this->pq.M;
continue;
}
if (!retrieval_context_->IsValid(res.ids[j] &
realtime::kRecoverIdxMask)) {
codes += this->pq.M;
continue;
}
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*codes++];
tab += this->pq.ksub;
}
res.add(j, dis);
}
assert(j == ncode);
}
inline void set_query(const float *query) override {
this->init_query(query);
}
inline void set_list(idx_t list_no, float coarse_dis) override {
this->init_list(list_no, coarse_dis, precompute_mode);
}
inline float distance_to_code(const uint8_t *code) const override {
assert(precompute_mode == 2);
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*code++];
tab += this->pq.ksub;
}
return dis;
}
inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids,
float *heap_sim, idx_t *heap_ids,
size_t k) const override {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (this->polysemous_ht > 0) {
assert(precompute_mode == 2);
this->scan_list_polysemous(ncode, codes, res);
} else if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else if (precompute_mode == 1) {
this->scan_list_with_pointer(ncode, codes, res);
} else if (precompute_mode == 0) {
this->scan_on_the_fly_dist(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
};
} // namespace tig_gamma
#endif
|
barrier.c | /*
$ gcc -fopenmp -O2 src/barrier.c -o bin/barrier
$ export OMP_NUM_THREADS=4
$ ./bin/barrier
Hebra 0 Tiempo=0 seg.
Hebra 1 Tiempo=0 seg.
Hebra 2 Tiempo=3 seg.
Hebra 3 Tiempo=3 seg.
Si el identificador de la hebra es menor que la mitad esperan 3 segundos y la barrera espera a que todas acaben
Vamos a usar la directiva barrier cuando haga falta una barrera osea casi nunca.
*/
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
main() {
int tid;
time_t t1,t2;
#pragma omp parallel private(tid,t1,t2)
{
tid = omp_get_thread_num();
if (tid < omp_get_num_threads()/2 ) system("sleep 3");
t1= time(NULL);
#pragma omp barrier
t2= time(NULL)-t1;
printf("Hebra %d Tiempo=%d seg.\n", tid, t2);
}
} |
my_fastmarching.h | #ifndef __MY_FASTMARCHING_H__
#define __MY_FASTMARCHING_H__
#include "fastmarching_dt.h"
#include "fastmarching_tree.h"
#ifdef USE_OPENMP
#include <omp.h>
#endif
#include <algorithm>
#ifndef SET_CLOCK
#define SET_CLOCK clock_gettime(CLOCK_MONOTONIC, &(ts[clock_id])); printf("[ timer %d - %d ]\n", area_id, clock_id++);
#endif
template<class T> bool my_fastmarching_dt_XY(T * inimg1d, float * &phi, int sz0, int sz1, int sz2, int cnn_type = 3, int bkg_thresh = 0)
{
#ifdef NEB_DEBUG
struct timespec ts[4];
int clock_id=0, area_id=2;
printf("-----start my fastmarching(sz0=%d, sz1=%d, sz2=%d)-----\n", sz0, sz1, sz2);
SET_CLOCK
#endif
#ifdef USE_OPENMP
//omp_set_num_threads(1);
printf("OpenMP thread=%d / %d\n", omp_get_max_threads(), omp_get_num_procs());
#endif
enum{ALIVE = -1, TRIAL = 0, FAR = 1};
const long tol_sz = sz0 * sz1 * sz2;
const long sz01 = sz0 * sz1;
//int cnn_type = 3; // ?
if(phi == 0) phi = new float[tol_sz];
char * state = new char[tol_sz];
int bkg_count = 0; // for process counting
int bdr_count = 0; // for process counting
for(long i = 0; i < tol_sz; i++)
{
if(inimg1d[i] <= bkg_thresh)
{
phi[i] = inimg1d[i];
state[i] = ALIVE;
//cout<<"+";cout.flush();
bkg_count++;
}
else
{
phi[i] = INF;
state[i] = FAR;
}
}
cout<<endl;
#ifdef NEB_DEBUG
SET_CLOCK
#endif
BasicHeap<HeapElem> heap;
map<long, HeapElem*> elems;
// init heap
{
//long i = -1, j = -1, k = -1;
/*
for(long ind = 0; ind < tol_sz; ind++)
{
long i = ind%sz0;
long j = (ind/sz0)%sz1;
long k = ind/(sz0*sz1);
*/
/*
i++;
if(i%sz0 == 0)
{
i=0; j++;
if(j%sz1==0){j=0; k++;}
}
*/
const long sz1sz0 =sz1*sz0;
#ifdef USE_OPENMP
printf("start omp\n");
#pragma omp parallel for
#endif
for(long k=0; k<sz2; k++){
//long nth = omp_get_thread_num();
for(long j=0; j<sz1; j++){
for(long i=0; i<sz0; i++){
long ind = k*sz1sz0 + j*sz0 + i;
if(state[ind] == ALIVE){
for(int kk = 0; kk <= 0; kk++){
long k2 = k+kk;
if(k2 < 0 || k2 >= sz2) continue;
for(int jj = -1; jj <= 1; jj++){
long j2 = j+jj;
if(j2 < 0 || j2 >= sz1) continue;
for(int ii = -1; ii <=1; ii++){
long i2 = i+ii;
if(i2 < 0 || i2 >= sz0) continue;
int offset = ABS(ii) + ABS(jj) + ABS(kk);
if(offset == 0 || offset > cnn_type) continue;
long ind2 = k2 * sz01 + j2 * sz0 + i2;
if(state[ind2] == FAR){
long min_ind = ind;
// get minimum Alive point around ind2
if(phi[min_ind] > 0.0){
//for(int kkk = 0; kkk <= 0; kkk++){
//long k3 = k2 + kkk;
//if(k3 >= sz2 || k3 < 0) continue;
{
long j3, i3, ind3, jjj, iii, offset2;
jjj=-1; iii=-1; offset2=2;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
jjj=-1; iii=0; offset2=1;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
jjj=-1; iii=1; offset2=2;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
jjj=0; iii=-1; offset2=1;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
jjj=0; iii=1; offset2=1;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
jjj=1; iii=-1; offset2=2;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
jjj=1; iii= 0; offset2=1;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
jjj=1; iii=1; offset2=2;
j3=j2+jjj; i3=i2+iii; ind3 = k2 * sz01 + j3 * sz0 + i3;
if(state[ind3] == ALIVE && phi[ind3] < phi[min_ind]){ if(!(i3 >= sz0 || i3 < 0 || j3 >= sz1 || j3 < 0 || offset2 > cnn_type)){ min_ind = ind3; } }
}
}
// over
phi[ind2] = phi[min_ind] + inimg1d[ind2];
state[ind2] = TRIAL;
HeapElem * elem = new HeapElem(ind2, phi[ind2]);
#pragma omp critical
{
heap.insert(elem);
elems[ind2] = elem;
bdr_count++;
}
}
}
}
}
}
}
}
}
}
cout<<"bkg_count = "<<bkg_count<<" ("<<bkg_count/(double)tol_sz<<")"<<endl;
cout<<"bdr_count = "<<bdr_count<<" ("<<bdr_count/(double)tol_sz<<")"<<endl;
cout<<"elems.size() = "<<elems.size()<<endl;
#ifdef NEB_DEBUG
SET_CLOCK
#endif
// loop
int time_counter = bkg_count;
double process1 = 0;
while(!heap.empty())
{
double process2 = (time_counter++)*100000.0/tol_sz;
if(process2 - process1 >= 10) {cout<<"\r"<<((int)process2)/1000.0<<"%";cout.flush(); process1 = process2;
//SAVE_PHI_IMAGE(phi, sz0, sz1, sz2, string("phi") + num2str((int)process1) + ".tif");
}
HeapElem* min_elem = heap.delete_min();
elems.erase(min_elem->img_ind);
long min_ind = min_elem->img_ind;
delete min_elem;
state[min_ind] = ALIVE;
int i = min_ind % sz0;
int j = (min_ind/sz0) % sz1;
int k = (min_ind/sz01) % sz2;
int w, h, d;
for(int kk = 0; kk <= 0; kk++)
{
d = k+kk;
if(d < 0 || d >= sz2) continue;
for(int jj = -1; jj <= 1; jj++)
{
h = j+jj;
if(h < 0 || h >= sz1) continue;
for(int ii = -1; ii <= 1; ii++)
{
w = i+ii;
if(w < 0 || w >= sz0) continue;
int offset = ABS(ii) + ABS(jj) + ABS(kk);
if(offset == 0 || offset > cnn_type) continue;
long index = d*sz01 + h*sz0 + w;
if(state[index] != ALIVE)
{
float new_dist = phi[min_ind] + inimg1d[index] * sqrt(double(offset));
if(state[index] == FAR)
{
phi[index] = new_dist;
HeapElem * elem = new HeapElem(index, phi[index]);
heap.insert(elem);
elems[index] = elem;
state[index] = TRIAL;
}
else if(state[index] == TRIAL)
{
if(phi[index] > new_dist)
{
phi[index] = new_dist;
HeapElem * elem = elems[index];
heap.adjust(elem->heap_id, phi[index]);
}
}
}
}
}
}
}
assert(elems.empty());
if(state) {
printf("delete state\n");
delete [] state; state = 0;
}
#ifdef NEB_DEBUG
SET_CLOCK
printf("*************************************\n");
for(int i; i<clock_id-1; i++){
printf("* time(ts[%d - %d] - tm[%d - %d]) = %3.2f\n", area_id, i, area_id, i+1, (ts[i+1].tv_sec - ts[i].tv_sec) + 0.000000001*(ts[i+1].tv_nsec - ts[i].tv_nsec));
}
printf("*************************************\n");
#endif
return true;
}
typedef struct _parent{
long id;
long parent;
} ParentList;
/*********************************************************************
* Function : fastmarching_tree
*
* Features :
* 1. Create fast marcing tree from root marker only
* 2. Background (intensity 0) will be ignored.
* 3. Graph augumented distance is used
*
* Input : root root marker
* inimg1d original 8bit image
*
* Output : tree output swc
* phi the distance for each pixels
* *******************************************************************/
template<class T> bool my_fastmarching_tree(MyMarker root
, T * inimg1d
, vector<MyMarker*> &outtree
, float * &phi
, long sz0
, long sz1
, long sz2
, int cnn_type = 3
, double bkg_thresh = 20
, bool is_break_accept = false
)
{
enum{ALIVE = -1, TRIAL = 0, FAR = 1};
long tol_sz = sz0 * sz1 * sz2;
long sz01 = sz0 * sz1;
long i;
//int cnn_type = 3; // ?
//float * phi = 0;
//long * parent = 0;
char * state = 0;
printf("-----start my fastmarching tree(sz0=%d, sz1=%d, sz2=%d)-----\n", sz0, sz1, sz2);
#ifdef NEB_DEBUG
struct timespec ts[10];
int clock_id=0, area_id=3;
printf("-----start my fastmarching tree(sz0=%d, sz1=%d, sz2=%d)-----\n", sz0, sz1, sz2);
SET_CLOCK ;
#endif
/*
long *parent;
long parent_index=0, parent_allocate_span=tol_sz/10;
parent = (long *)malloc(parent_allocate_span * sizeof(long));
*/
vector<ParentList> parentlist;
parentlist.clear();
try
{
if(phi==0){ phi = new float[tol_sz]; }
//parent = new long[tol_sz];
state = new char[tol_sz];
}
catch (...)
{
cout << "********* Fail to allocate memory. quit fastmarching_tree()." << endl;
if (phi) {delete []phi; phi=0;}
//if (parent) {delete []parent; parent=0;}
if (state) {delete []state; state=0;}
return false;
}
#ifdef NEB_DEBUG
SET_CLOCK ;
#endif
fill(phi, &(phi[tol_sz]), INF);
//fill(parent, &(parent[tol_sz]), -1);
// change initialization value of parent to constant (i -> -1)
//fill(parent, &(parent[tol_sz]), -1);
//for(long i = 0; i < tol_sz; i++){ parent[i] = i; }
memset(state, FAR, tol_sz);
/*
for(long i = 0; i < tol_sz; i++){
phi[i] = INF;
parent[i] = i;
state[i] = FAR;
}
*/
#ifdef NEB_DEBUG
printf("INF=%f\n", (float)INF);
for(long i=0; i<10; i++){ printf("%d : %f %d\n", i, phi[i], state[i]); }
for(long i=tol_sz-1; i>tol_sz-10; i--){ printf("%d : %f %d\n", i, phi[i], state[i]); }
#endif
#ifdef NEB_DEBUG
SET_CLOCK ;
#endif
// GI parameter min_int, max_int, li
double max_int = 0; // maximum intensity, used in GI
double min_int = INF;
for(i = 0; i < tol_sz; i++)
{
if (inimg1d[i] > max_int) max_int = inimg1d[i];
else if (inimg1d[i] < min_int) min_int = inimg1d[i];
}
max_int -= min_int;
double li = 10;
// initialization
// init state and phi for root
long rootx = root.x + 0.5;
long rooty = root.y + 0.5;
long rootz = root.z + 0.5;
long root_ind = rootz*sz01 + rooty*sz0 + rootx;
state[root_ind] = ALIVE;
phi[root_ind] = 0.0;
BasicHeap<HeapElemX> heap;
map<long, HeapElemX*> elems;
// init heap
{
long index = root_ind;
HeapElemX *elem = new HeapElemX(index, phi[index]);
elem->prev_ind = index;
heap.insert(elem);
elems[index] = elem;
}
#ifdef NEB_DEBUG
SET_CLOCK ;
#endif
// loop
int time_counter = 1;
double process1 = 0;
while(!heap.empty())
{
double process2 = (time_counter++)*10000.0/tol_sz;
//cout<<"\r"<<((int)process2)/100.0<<"%";cout.flush();
if(process2 - process1 >= 1)
{
cout<<"\r"<<((int)process2)/100.0<<"%";cout.flush(); process1 = process2;
}
HeapElemX* min_elem = heap.delete_min();
elems.erase(min_elem->img_ind);
long min_ind = min_elem->img_ind;
long prev_ind = min_elem->prev_ind;
delete min_elem;
//change_parent_style
//parent[min_ind] = prev_ind;
ParentList tmp_plist;
tmp_plist.id = min_ind;
tmp_plist.parent = prev_ind;
parentlist.push_back(tmp_plist);
state[min_ind] = ALIVE;
int i = min_ind % sz0;
int j = (min_ind/sz0) % sz1;
int k = (min_ind/sz01) % sz2;
int w, h, d;
for(int kk = -1; kk <= 1; kk++)
{
d = k+kk;
if(d < 0 || d >= sz2) continue;
for(int jj = -1; jj <= 1; jj++)
{
h = j+jj;
if(h < 0 || h >= sz1) continue;
for(int ii = -1; ii <= 1; ii++)
{
w = i+ii;
if(w < 0 || w >= sz0) continue;
int offset = ABS(ii) + ABS(jj) + ABS(kk);
if(offset == 0 || offset > cnn_type) continue;
double factor = (offset == 1) ? 1.0 : ((offset == 2) ? 1.414214 : ((offset == 3) ? 1.732051 : 0.0));
long index = d*sz01 + h*sz0 + w;
if (is_break_accept)
{
if(inimg1d[index] <= bkg_thresh &&
inimg1d[min_ind] <= bkg_thresh)
continue;
}
else
{
if(inimg1d[index] <= bkg_thresh)
continue;
}
if(state[index] != ALIVE)
{
double new_dist = phi[min_ind] + (GI(index) + GI(min_ind))*factor*0.5;
long prev_ind = min_ind;
if(state[index] == FAR)
{
phi[index] = new_dist;
HeapElemX * elem = new HeapElemX(index, phi[index]);
elem->prev_ind = prev_ind;
heap.insert(elem);
elems[index] = elem;
state[index] = TRIAL;
}
else if(state[index] == TRIAL)
{
if (phi[index] > new_dist)
{
phi[index] = new_dist;
HeapElemX * elem = elems[index];
heap.adjust(elem->heap_id, phi[index]);
elem->prev_ind = prev_ind;
}
}
}
}
}
}
}
printf("psize = %d\n", parentlist.size());
#ifdef NEB_DEBUG
SET_CLOCK ;
#endif
// save current swc tree
if (1)
{
map<long, MyMarker*> tmp_map;
const long sz1sz0 = sz1 * sz0;
for(long k=0; k<sz2; k++){
for(long j=0; j<sz1; j++){
for(long i=0; i<sz0; i++){
long ind = (k*sz1sz0) + (j*sz0) + i;
if(state[ind] != ALIVE) continue;
MyMarker * marker = new MyMarker(i,j,k);
tmp_map[ind] = marker;
outtree.push_back(marker);
}
}
}
for(long i=0; i<tmp_map.size(); i++){
if(state[i] != ALIVE) continue;
tmp_map[i]->parent = 0;
}
for(long i=0; i<parentlist.size(); i++){
if(state[parentlist[i].id] != ALIVE) continue;
MyMarker * marker1 = tmp_map[parentlist[i].id];
if(parentlist[i].parent>0 && parentlist[i].parent!=parentlist[i].id){
MyMarker * marker2 = tmp_map[parentlist[i].parent];
marker1->parent = marker2;
}
}
/*
long counter=0;
for(long k=0; k<sz2; k++){
for(long j=0; j<sz1; j++){
for(long i=0; i<sz0; i++){
long ind = (k*sz1sz0) + (j*sz0) + i;
if(state[ind] != ALIVE) continue;
long ind2 = parent[ind];
MyMarker * marker1 = tmp_map[ind];
if(ind2 < 0 || marker1 == tmp_map[ind2]){
marker1->parent = 0;
}else{
MyMarker * marker2 = tmp_map[ind2];
marker1->parent = marker2;
counter++;
//tmp_map[ind]->parent = tmp_map[ind2];
}
}
}
}
printf("counter=%d\n", counter);
*/
}
#ifdef NEB_DEBUG
SET_CLOCK ;
#endif
// over
map<long, HeapElemX*>::iterator mit = elems.begin();
while (mit != elems.end())
{
HeapElemX * elem = mit->second; delete elem; mit++;
}
parentlist.clear();
if(phi){delete [] phi; phi = 0;}
//if(parent){delete [] parent; parent = 0;}
if(state) {delete [] state; state = 0;}
#ifdef NEB_DEBUG
SET_CLOCK
printf("*************************************\n");
for(int i; i<clock_id-1; i++){
printf("* time(ts[%d - %d] - tm[%d - %d]) = %3.2f\n", area_id, i, area_id, i+1, (ts[i+1].tv_sec - ts[i].tv_sec) + 0.000000001*(ts[i+1].tv_nsec - ts[i].tv_nsec));
}
printf("*************************************\n");
#endif
return true;
}
#endif /* __MY_FASTMARCHING_H__ */
|
symv_c_coo_n_hi_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/kernel_plain.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
symv_coo_n_hi_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT nnz = A->nnz;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < nnz; i++)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT r = A->row_indx[i];
const ALPHA_INT c = A->col_indx[i];
if (r > c)
{
continue;
}
ALPHA_Number v;
alpha_mul_3c(v, alpha, A->values[i]);
if (r == c)
{
alpha_madde(tmp[threadId][r], v, x[c]);
}
else
{
alpha_madde(tmp[threadId][r], v, x[c]);
alpha_madde(tmp[threadId][c], v, x[r]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (int i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
return symv_coo_n_hi_omp(alpha, A, x, beta, y);
}
|
scheduling_loops.c | #include <stdio.h>
#include <stdlib.h>
#include "stdbool.h"
int main(int argc, char **argv){
int n_iter = 1000;
bool is_prime[n_iter];
#pragma omp parallel for schedule(dynamic)
for(int index=0; index < n_iter; index++){
long potential_prime = rand() % (4000000000 + 1);
for (long multiple = 2; multiple < potential_prime; multiple++){
if ((potential_prime % multiple) == 0){
is_prime[index] = false;
break;
}
}
}
return 0;
}
|
lastprivate0.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int main(void)
{
int i,is=0;
#pragma omp parallel for private(is)
for (i=0;i<100;i++)
is = is+i;
printf("%d=%d\n ",i,is);
is=0;
#pragma omp parallel for firstprivate(is)
for (i=0;i<100;i++)
is = is+i;
printf("%d=%d\n ",i,is);
is=0;
#pragma omp parallel for lastprivate(is)
for (i=0;i<100;i++)
is = is+i;
printf("%d=%d\n ",i,is);
is=0;
//#pragma omp parallel for lastprivate(is)
#pragma omp parallel for schedule(static,30) firstprivate(is) lastprivate(is)
for (i=0;i<100;i++)
is = is+i;
/*The value of is depends on the num of threads and schedule method*/
printf("%d, %d\n ",i,is);
is=0;
for (i=90;i<100;i++)
is = is+i;
printf("%d, %d\n ",i,is);
return 0;
}
|
GB_unop__one_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__one_fc32_fc32)
// op(A') function: GB (_unop_tran__one_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: ;
// unaryop: cij = GxB_CMPLXF(1,0)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GxB_CMPLXF(1,0) ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = GxB_CMPLXF(1,0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__one_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = GxB_CMPLXF(1,0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = GxB_CMPLXF(1,0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__one_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_saxpy3_template.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_template: C=A*B, C<M>=A*B, or C<!M>=A*B via saxpy3 method
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// GB_AxB_saxpy3_template.c computes C=A*B for any semiring and matrix types.
// It is #include'd in GB_AxB_saxpy3 to construct the generic method (for
// arbitary user-defined operators and/or typecasting), and in the hard-coded
// GB_Asaxpy3B* workers in the Generated/ folder.
#include "GB_unused.h"
//------------------------------------------------------------------------------
// template code for C=A*B via the saxpy3 method
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get the chunk size
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// get M, A, B, and C
//--------------------------------------------------------------------------
int64_t *GB_RESTRICT Cp = C->p ;
// const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t cvlen = C->vlen ;
const int64_t cnvec = C->nvec ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bi = B->i ;
const GB_BTYPE *GB_RESTRICT Bx = B_is_pattern ? NULL : B->x ;
// const int64_t bvlen = B->vlen ;
// const int64_t bnvec = B->nvec ;
// const bool B_is_hyper = B->is_hyper ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPER (A) ;
const GB_ATYPE *GB_RESTRICT Ax = A_is_pattern ? NULL : A->x ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mh = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
const GB_void *GB_RESTRICT Mx = NULL ;
size_t msize = 0 ;
int64_t mnvec = 0 ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mp = M->p ;
Mh = M->h ;
Mi = M->i ;
Mx = (Mask_struct ? NULL : (M->x)) ;
msize = M->type->size ;
mnvec = M->nvec ;
M_is_hyper = M->is_hyper ;
}
// 3 cases:
// M not present and Mask_comp false: compute C=A*B
// M present and Mask_comp false: compute C<M>=A*B
// M present and Mask_comp true : compute C<!M>=A*B
// If M is NULL on input, then Mask_comp is also false on input.
bool mask_is_M = (M != NULL && !Mask_comp) ;
//==========================================================================
// phase2: numeric work for fine tasks
//==========================================================================
// Coarse tasks: nothing to do in phase2.
// Fine tasks: compute nnz (C(:,j)), and values in Hx via atomics.
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < nfine ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kk = TaskList [taskid].vector ;
int64_t hash_size = TaskList [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
int64_t pB = TaskList [taskid].start ;
int64_t pB_end = TaskList [taskid].end + 1 ;
#if !GB_IS_ANY_PAIR_SEMIRING
GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *) TaskList [taskid].Hx ;
#endif
int64_t pleft = 0, pright = anvec-1 ;
if (use_Gustavson)
{
//------------------------------------------------------------------
// phase2: fine Gustavson task
//------------------------------------------------------------------
// Hf [i] == 0: unlocked, i has not been seen in C(:,j).
// Hx [i] is not initialized.
// M(i,j) is 0, or M is not present.
// if M: Hf [i] stays equal to 0 (or 3 if locked)
// if !M, or no M: C(i,j) is a new entry seen for 1st time
// Hf [i] == 1: unlocked, i has not been seen in C(:,j).
// Hx [i] is not initialized. M is present.
// M(i,j) is 1. (either M or !M case)
// if M: C(i,j) is a new entry seen for the first time.
// if !M: Hf [i] stays equal to 1 (or 3 if locked)
// Hf [i] == 2: unlocked, i has been seen in C(:,j).
// Hx [i] is initialized. This case is independent of M.
// Hf [i] == 3: locked. Hx [i] cannot be accessed.
int8_t *GB_RESTRICT Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf;
if (M == NULL)
{
//--------------------------------------------------------------
// phase2: fine Gustavson task, C=A*B
//--------------------------------------------------------------
// Hf [i] is initially 0.
// 0 -> 3 : to lock, if i seen for first time
// 2 -> 3 : to lock, if i seen already
// 3 -> 2 : to unlock; now i has been seen
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
int8_t f ;
#if GB_IS_ANY_MONOID
GB_ATOMIC_READ
f = Hf [i] ; // grab the entry
if (f == 2) continue ; // check if already updated
GB_ATOMIC_WRITE
Hf [i] = 2 ; // flag the entry
GB_ATOMIC_WRITE_HX (i, t) ; // Hx [i] = t
#else
#if GB_HAS_ATOMIC
GB_ATOMIC_READ
f = Hf [i] ; // grab the entry
if (f == 2) // if true, update C(i,j)
{
GB_ATOMIC_UPDATE_HX (i, t) ; // Hx [i] += t
continue ; // C(i,j) has been updated
}
#endif
do // lock the entry
{
// do this atomically:
// { f = Hf [i] ; Hf [i] = 3 ; }
GB_ATOMIC_CAPTURE_INT8 (f, Hf [i], 3) ;
} while (f == 3) ; // lock owner gets f=0 or 2
if (f == 0)
{
// C(i,j) is a new entry
GB_ATOMIC_WRITE_HX (i, t) ; // Hx [i] = t
}
else // f == 2
{
// C(i,j) already appears in C(:,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // Hx [i] += t
}
GB_ATOMIC_WRITE
Hf [i] = 2 ; // unlock the entry
#endif
}
}
}
else if (mask_is_M)
{
//--------------------------------------------------------------
// phase2: fine Gustavson task, C<M>=A*B
//--------------------------------------------------------------
// Hf [i] is 0 if M(i,j) not present or M(i,j)=0.
// 0 -> 1 : has already been done in phase0 if M(i,j)=1
// 0 -> 0 : to ignore, if M(i,j)=0
// 1 -> 3 : to lock, if i seen for first time
// 2 -> 3 : to lock, if i seen already
// 3 -> 2 : to unlock; now i has been seen
GB_GET_M_j ; // get M(:,j)
GB_GET_M_j_RANGE (16) ; // get first and last in M(:,j)
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
GB_SKIP_IF_A_k_DISJOINT_WITH_M_j ;
GB_GET_B_kj ; // bkj = B(k,j)
#if GB_IS_ANY_MONOID
#define GB_IKJ \
int8_t f ; \
GB_ATOMIC_READ \
f = Hf [i] ; /* grab the entry */ \
if (f == 0 || f == 2) continue ; \
GB_ATOMIC_WRITE \
Hf [i] = 2 ; /* unlock the entry */ \
GB_MULT_A_ik_B_kj ; /* t = A(i,k) * B(k,j) */ \
GB_ATOMIC_WRITE_HX (i, t) ; /* Hx [i] = t */
#else
#define GB_IKJ \
{ \
GB_MULT_A_ik_B_kj ; /* t = A(i,k) * B(k,j) */ \
int8_t f ; \
GB_ATOMIC_READ \
f = Hf [i] ; /* grab the entry */ \
if (GB_HAS_ATOMIC && (f == 2)) \
{ \
/* C(i,j) already seen; update it */ \
GB_ATOMIC_UPDATE_HX (i, t) ; /* Hx [i] += t */ \
continue ; /* C(i,j) has been updated */ \
} \
if (f == 0) continue ; /* M(i,j)=0; ignore C(i,j)*/ \
do /* lock the entry */ \
{ \
/* do this atomically: */ \
/* { f = Hf [i] ; Hf [i] = 3 ; } */ \
GB_ATOMIC_CAPTURE_INT8 (f, Hf [i], 3) ; \
} while (f == 3) ; /* lock owner gets f=1 or 2 */ \
if (f == 1) \
{ \
/* C(i,j) is a new entry */ \
GB_ATOMIC_WRITE_HX (i, t) ; /* Hx [i] = t */ \
} \
else /* f == 2 */ \
{ \
/* C(i,j) already appears in C(:,j) */ \
GB_ATOMIC_UPDATE_HX (i, t) ; /* Hx [i] += t */ \
} \
GB_ATOMIC_WRITE \
Hf [i] = 2 ; /* unlock the entry */ \
}
#endif
#define GB_IKJ_VECTORIZE
#define GB_IKJ_IVDEP
GB_SCAN_M_j_OR_A_k ;
#undef GB_IKJ_VECTORIZE
#undef GB_IKJ_IVDEP
#undef GB_IKJ
}
}
else
{
//--------------------------------------------------------------
// phase2: fine Gustavson task, C<!M>=A*B
//--------------------------------------------------------------
// Hf [i] is 0 if M(i,j) not present or M(i,j)=0.
// 0 -> 1 : has already been done in phase0 if M(i,j)=1
// 1 -> 1 : to ignore, if M(i,j)=1
// 0 -> 3 : to lock, if i seen for first time
// 2 -> 3 : to lock, if i seen already
// 3 -> 2 : to unlock; now i has been seen
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
int8_t f ;
#if GB_IS_ANY_MONOID
GB_ATOMIC_READ
f = Hf [i] ; // grab the entry
if (f == 1 || f == 2) continue ;
GB_ATOMIC_WRITE
Hf [i] = 2 ; // unlock the entry
GB_ATOMIC_WRITE_HX (i, t) ; // Hx [i] = t
#else
GB_ATOMIC_READ
f = Hf [i] ; // grab the entry
#if GB_HAS_ATOMIC
if (f == 2) // if true, update C(i,j)
{
GB_ATOMIC_UPDATE_HX (i, t) ; // Hx [i] += t
continue ; // C(i,j) has been updated
}
#endif
if (f == 1) continue ; // M(i,j)=1; ignore C(i,j)
do // lock the entry
{
// do this atomically:
// { f = Hf [i] ; Hf [i] = 3 ; }
GB_ATOMIC_CAPTURE_INT8 (f, Hf [i], 3) ;
} while (f == 3) ; // lock owner of gets f=0 or 2
if (f == 0)
{
// C(i,j) is a new entry
GB_ATOMIC_WRITE_HX (i, t) ; // Hx [i] = t
}
else // f == 2
{
// C(i,j) already seen
GB_ATOMIC_UPDATE_HX (i, t) ; // Hx [i] += t
}
GB_ATOMIC_WRITE
Hf [i] = 2 ; // unlock the entry
#endif
}
}
}
}
else
{
//------------------------------------------------------------------
// phase2: fine hash task
//------------------------------------------------------------------
// Each hash entry Hf [hash] splits into two parts, (h,f). f
// is in the 2 least significant bits. h is 62 bits, and is
// the 1-based index i of the C(i,j) entry stored at that
// location in the hash table.
// If M is present (M or !M), and M(i,j)=1, then (i+1,1)
// has been inserted into the hash table, in phase0.
// Given Hf [hash] split into (h,f)
// h == 0, f == 0: unlocked and unoccupied.
// note that if f=0, h must be zero too.
// h == i+1, f == 1: unlocked, occupied by M(i,j)=1.
// C(i,j) has not been seen, or is ignored.
// Hx is not initialized. M is present.
// if !M: this entry will be ignored in C.
// h == i+1, f == 2: unlocked, occupied by C(i,j).
// Hx is initialized. M is no longer
// relevant.
// h == (anything), f == 3: locked.
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t hash_bits = (hash_size-1) ;
if (M == NULL)
{
//--------------------------------------------------------------
// phase2: fine hash task, C=A*B
//--------------------------------------------------------------
// Given Hf [hash] split into (h,f)
// h == 0 , f == 0 : unlocked and unoccupied.
// h == i+1, f == 2 : unlocked, occupied by C(i,j).
// Hx is initialized.
// h == ..., f == 3 : locked.
// 0 -> 3 : to lock, if i seen for first time
// 2 -> 3 : to lock, if i seen already
// 3 -> 2 : to unlock; now i has been seen
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
int64_t i1 = i + 1 ; // i1 = one-based index
int64_t i_unlocked = (i1 << 2) + 2 ; // (i+1,2)
for (GB_HASH (i)) // find i in hash table
{
int64_t hf ;
GB_ATOMIC_READ
hf = Hf [hash] ; // grab the entry
#if GB_HAS_ATOMIC
if (hf == i_unlocked) // if true, update C(i,j)
{
GB_ATOMIC_UPDATE_HX (hash, t) ;// Hx [.]+=t
break ; // C(i,j) has been updated
}
#endif
int64_t h = (hf >> 2) ;
if (h == 0 || h == i1)
{
// h=0: unoccupied, h=i1: occupied by i
do // lock the entry
{
// do this atomically:
// { hf = Hf [hash] ; Hf [hash] |= 3 ; }
GB_ATOMIC_CAPTURE_INT64_OR (hf,Hf[hash],3) ;
} while ((hf & 3) == 3) ; // owner: f=0 or 2
if (hf == 0) // f == 0
{
// C(i,j) is a new entry in C(:,j)
// Hx [hash] = t
GB_ATOMIC_WRITE_HX (hash, t) ;
GB_ATOMIC_WRITE
Hf [hash] = i_unlocked ; // unlock entry
break ;
}
if (hf == i_unlocked) // f == 2
{
// C(i,j) already appears in C(:,j)
// Hx [hash] += t
GB_ATOMIC_UPDATE_HX (hash, t) ;
GB_ATOMIC_WRITE
Hf [hash] = i_unlocked ; // unlock entry
break ;
}
// hash table occupied, but not with i
GB_ATOMIC_WRITE
Hf [hash] = hf ; // unlock with prior value
}
}
}
}
}
else if (mask_is_M)
{
//--------------------------------------------------------------
// phase2: fine hash task, C<M>=A*B
//--------------------------------------------------------------
// Given Hf [hash] split into (h,f)
// h == 0 , f == 0 : unlocked, unoccupied. C(i,j) ignored
// h == i+1, f == 1 : unlocked, occupied by M(i,j)=1.
// C(i,j) has not been seen.
// Hx is not initialized.
// h == i+1, f == 2 : unlocked, occupied by C(i,j), M(i,j)=1
// Hx is initialized.
// h == ..., f == 3 : locked.
// 0 -> 0 : to ignore, if M(i,j)=0
// 1 -> 3 : to lock, if i seen for first time
// 2 -> 3 : to lock, if i seen already
// 3 -> 2 : to unlock; now i has been seen
GB_GET_M_j ; // get M(:,j)
GB_GET_M_j_RANGE (16) ; // get first and last in M(:,j)
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
GB_SKIP_IF_A_k_DISJOINT_WITH_M_j ;
GB_GET_B_kj ; // bkj = B(k,j)
#define GB_IKJ_VECTORIZE
#define GB_IKJ_IVDEP
#define GB_IKJ \
{ \
GB_MULT_A_ik_B_kj ; /* t = A(i,k) * B(k,j) */ \
int64_t i1 = i + 1 ; /* i1 = one-based index */ \
int64_t i_unlocked = (i1 << 2) + 2 ; /* (i+1,2) */ \
for (GB_HASH (i)) /* find i in hash table */ \
{ \
int64_t hf ; \
GB_ATOMIC_READ \
hf = Hf [hash] ; /* grab the entry */ \
if (GB_HAS_ATOMIC && (hf == i_unlocked)) \
{ \
/* Hx [hash] += t */ \
GB_ATOMIC_UPDATE_HX (hash, t) ; \
break ; /* C(i,j) has been updated */ \
} \
if (hf == 0) break ; /* M(i,j)=0; ignore Cij */ \
if ((hf >> 2) == i1) /* if true, i found */ \
{ \
do /* lock the entry */ \
{ \
/* do this atomically: */ \
/* { hf = Hf [hash] ; Hf [hash] |= 3 ; }*/ \
GB_ATOMIC_CAPTURE_INT64_OR (hf,Hf[hash],3);\
} while ((hf & 3) == 3) ; /* own: f=1,2 */ \
if ((hf & 3) == 1) /* f == 1 */ \
{ \
/* C(i,j) is a new entry in C(:,j) */ \
/* Hx [hash] = t */ \
GB_ATOMIC_WRITE_HX (hash, t) ; \
} \
else /* f == 2 */ \
{ \
/* C(i,j) already appears in C(:,j) */ \
/* Hx [hash] += t */ \
GB_ATOMIC_UPDATE_HX (hash, t) ; \
} \
GB_ATOMIC_WRITE \
Hf [hash] = i_unlocked ; /* unlock entry */ \
break ; \
} \
} \
}
GB_SCAN_M_j_OR_A_k ;
#undef GB_IKJ_VECTORIZE
#undef GB_IKJ_IVDEP
#undef GB_IKJ
}
}
else
{
//--------------------------------------------------------------
// phase2: fine hash task, C<!M>=A*B
//--------------------------------------------------------------
// Given Hf [hash] split into (h,f)
// h == 0 , f == 0 : unlocked and unoccupied.
// h == i+1, f == 1 : unlocked, occupied by M(i,j)=1.
// C(i,j) is ignored.
// h == i+1, f == 2 : unlocked, occupied by C(i,j).
// Hx is initialized.
// h == (anything), f == 3: locked.
// 1 -> 1 : to ignore, if M(i,j)=1
// 0 -> 3 : to lock, if i seen for first time
// 2 -> 3 : to lock, if i seen already
// 3 -> 2 : to unlock; now i has been seen
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
int64_t i1 = i + 1 ; // i1 = one-based index
int64_t i_unlocked = (i1 << 2) + 2 ; // (i+1,2)
int64_t i_masked = (i1 << 2) + 1 ; // (i+1,1)
for (GB_HASH (i)) // find i in hash table
{
int64_t hf ;
GB_ATOMIC_READ
hf = Hf [hash] ; // grab the entry
#if GB_HAS_ATOMIC
if (hf == i_unlocked) // if true, update C(i,j)
{
GB_ATOMIC_UPDATE_HX (hash, t) ;// Hx [.]+=t
break ; // C(i,j) has been updated
}
#endif
if (hf == i_masked) break ; // M(i,j)=1; ignore
int64_t h = (hf >> 2) ;
if (h == 0 || h == i1)
{
// h=0: unoccupied, h=i1: occupied by i
do // lock the entry
{
// do this atomically:
// { hf = Hf [hash] ; Hf [hash] |= 3 ; }
GB_ATOMIC_CAPTURE_INT64_OR (hf,Hf[hash],3) ;
} while ((hf & 3) == 3) ; // owner: f=0,1,2
if (hf == 0) // f == 0
{
// C(i,j) is a new entry in C(:,j)
// Hx [hash] = t
GB_ATOMIC_WRITE_HX (hash, t) ;
GB_ATOMIC_WRITE
Hf [hash] = i_unlocked ; // unlock entry
break ;
}
if (hf == i_unlocked) // f == 2
{
// C(i,j) already appears in C(:,j)
// Hx [hash] += t
GB_ATOMIC_UPDATE_HX (hash, t) ;
GB_ATOMIC_WRITE
Hf [hash] = i_unlocked ; // unlock entry
break ;
}
// hash table occupied, but not with i,
// or with i but M(i,j)=1 so C(i,j) ignored
GB_ATOMIC_WRITE
Hf [hash] = hf ; // unlock with prior value
}
}
}
}
}
}
}
//==========================================================================
// phase3/phase4: count nnz(C(:,j)) for fine tasks, cumsum of Cp
//==========================================================================
int64_t cjnz_max = GB_AxB_saxpy3_cumsum (C, TaskList,
nfine, chunk, nthreads) ;
//==========================================================================
// phase5: numeric phase for coarse tasks, gather for fine tasks
//==========================================================================
// allocate Ci and Cx
int64_t cnz = Cp [cnvec] ;
GrB_Info info = GB_ix_alloc (C, cnz, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
int64_t *GB_RESTRICT Ci = C->i ;
GB_CTYPE *GB_RESTRICT Cx = C->x ;
#if GB_IS_ANY_PAIR_SEMIRING
// ANY_PAIR semiring: result is purely symbolic
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
Cx [pC] = 1 ;
}
// Just a precaution; these variables are not used below. Any attempt
// to access them will lead to a compile error.
#define Cx is not used
#define Hx is not used
// these have been renamed to ANY_PAIR:
// EQ_PAIR
// LAND_PAIR
// LOR_PAIR
// MAX_PAIR
// MIN_PAIR
// TIMES_PAIR
#endif
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
#if !GB_IS_ANY_PAIR_SEMIRING
GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *) TaskList [taskid].Hx ;
#endif
int64_t hash_size = TaskList [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
if (taskid < nfine)
{
//------------------------------------------------------------------
// fine task: gather pattern and values
//------------------------------------------------------------------
int64_t kk = TaskList [taskid].vector ;
int team_size = TaskList [taskid].team_size ;
int master = TaskList [taskid].master ;
int my_teamid = taskid - master ;
int64_t pC = Cp [kk] ;
if (use_Gustavson)
{
//--------------------------------------------------------------
// phase5: fine Gustavson task, C=A*B, C<M>=A*B, or C<!M>=A*B
//--------------------------------------------------------------
// Hf [i] == 2 if C(i,j) is an entry in C(:,j)
int8_t *GB_RESTRICT
Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t cjnz = Cp [kk+1] - pC ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, cvlen, my_teamid, team_size) ;
if (cjnz == cvlen)
{
// C(:,j) is dense
for (int64_t i = istart ; i < iend ; i++)
{
Ci [pC + i] = i ;
}
#if !GB_IS_ANY_PAIR_SEMIRING
// copy Hx [istart:iend-1] into Cx [pC+istart:pC+iend-1]
GB_CIJ_MEMCPY (pC + istart, istart, iend - istart) ;
#endif
}
else
{
// C(:,j) is sparse
pC += TaskList [taskid].my_cjnz ;
for (int64_t i = istart ; i < iend ; i++)
{
if (Hf [i] == 2)
{
#if !GB_IS_ANY_PAIR_SEMIRING
GB_CIJ_GATHER (pC, i) ; // Cx [pC] = Hx [i]
#endif
Ci [pC++] = i ;
}
}
}
}
else
{
//--------------------------------------------------------------
// phase5: fine hash task, C=A*B, C<M>=A*B, C<!M>=A*B
//--------------------------------------------------------------
// (Hf [hash] & 3) == 2 if C(i,j) is an entry in C(:,j),
// and the index i of the entry is (Hf [hash] >> 2) - 1.
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t mystart, myend ;
GB_PARTITION (mystart, myend, hash_size, my_teamid, team_size) ;
pC += TaskList [taskid].my_cjnz ;
for (int64_t hash = mystart ; hash < myend ; hash++)
{
int64_t hf = Hf [hash] ;
if ((hf & 3) == 2)
{
int64_t i = (hf >> 2) - 1 ; // found C(i,j) in hash
Ci [pC++] = i ;
}
}
}
}
else
{
//------------------------------------------------------------------
// numeric coarse task: compute C(:,kfirst:klast)
//------------------------------------------------------------------
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t kfirst = TaskList [taskid].start ;
int64_t klast = TaskList [taskid].end ;
int64_t nk = klast - kfirst + 1 ;
int64_t mark = 2*nk + 1 ;
if (use_Gustavson)
{
//--------------------------------------------------------------
// phase5: coarse Gustavson task
//--------------------------------------------------------------
if (M == NULL)
{
//----------------------------------------------------------
// phase5: coarse Gustavson task, C=A*B
//----------------------------------------------------------
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t pC = Cp [kk] ;
int64_t cjnz = Cp [kk+1] - pC ;
if (cjnz == 0) continue ; // nothing to do
GB_GET_B_j ; // get B(:,j)
mark++ ;
if (cjnz == cvlen) // C(:,j) is dense
{
GB_COMPUTE_DENSE_C_j ; // C(:,j) = A*B(:,j)
}
else if (bjnz == 1) // C(:,j) = A(:,k)*B(k,j)
{
GB_COMPUTE_C_j_WHEN_NNZ_B_j_IS_ONE ;
}
else if (16 * cjnz > cvlen) // C(:,j) is not very sparse
{
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
if (Hf [i] != mark)
{
// C(i,j) = A(i,k) * B(k,j)
Hf [i] = mark ;
GB_HX_WRITE (i, t) ; // Hx [i] = t
}
else
{
// C(i,j) += A(i,k) * B(k,j)
GB_HX_UPDATE (i, t) ; // Hx [i] += t
}
}
}
GB_GATHER_ALL_C_j(mark) ; // gather into C(:,j)
}
else // C(:,j) is very sparse
{
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
if (Hf [i] != mark)
{
// C(i,j) = A(i,k) * B(k,j)
Hf [i] = mark ;
GB_HX_WRITE (i, t) ; // Hx [i] = t
Ci [pC++] = i ;
}
else
{
// C(i,j) += A(i,k) * B(k,j)
GB_HX_UPDATE (i, t) ; // Hx [i] += t
}
}
}
GB_SORT_AND_GATHER_C_j ; // gather into C(:,j)
}
}
}
else if (mask_is_M)
{
//----------------------------------------------------------
// phase5: coarse Gustavson task, C<M>=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Hf [i] < mark : M(i,j)=0, C(i,j) is ignored.
// Hf [i] == mark : M(i,j)=1, and C(i,j) not yet seen.
// Hf [i] == mark+1 : M(i,j)=1, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t pC = Cp [kk] ;
int64_t cjnz = Cp [kk+1] - pC ;
if (cjnz == 0) continue ; // nothing to do
GB_GET_B_j ; // get B(:,j)
if (cjnz == cvlen) // C(:,j) is dense
{
GB_COMPUTE_DENSE_C_j ; // C(:,j) = A*B(:,j)
continue ; // no need to examine M(:,j)
}
GB_GET_M_j ; // get M(:,j)
GB_GET_M_j_RANGE (64) ; // get first and last in M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
// scatter M(:,j)
GB_SCATTER_M_j (pM_start, pM_end, mark) ;
if (16 * cjnz > cvlen) // C(:,j) is not very sparse
{
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
GB_SKIP_IF_A_k_DISJOINT_WITH_M_j ;
GB_GET_B_kj ; // bkj = B(k,j)
#define GB_IKJ_VECTORIZE GB_PRAGMA_VECTORIZE
#define GB_IKJ_IVDEP GB_PRAGMA_IVDEP
#define GB_IKJ \
{ \
int64_t hf = Hf [i] ; \
if (hf == mark) \
{ \
/* C(i,j) = A(i,k) * B(k,j) */ \
Hf [i] = mark1 ; /* mark as seen */\
GB_MULT_A_ik_B_kj ; /* t = aik*bkj */ \
GB_HX_WRITE (i, t) ; /* Hx [i] = t */ \
} \
else if (hf == mark1) \
{ \
/* C(i,j) += A(i,k) * B(k,j) */ \
GB_MULT_A_ik_B_kj ; /* t = aik*bkj */ \
GB_HX_UPDATE (i, t) ;/* Hx [i] += t */ \
} \
}
GB_SCAN_M_j_OR_A_k ;
#undef GB_IKJ_VECTORIZE
#undef GB_IKJ_IVDEP
#undef GB_IKJ
}
GB_GATHER_ALL_C_j(mark1) ; // gather into C(:,j)
}
else // C(:,j) is very sparse
{
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
GB_SKIP_IF_A_k_DISJOINT_WITH_M_j ;
GB_GET_B_kj ; // bkj = B(k,j)
#define GB_IKJ_VECTORIZE GB_PRAGMA_VECTORIZE
#define GB_IKJ_IVDEP GB_PRAGMA_IVDEP
#define GB_IKJ \
{ \
int64_t hf = Hf [i] ; \
if (hf == mark) \
{ \
/* C(i,j) = A(i,k) * B(k,j) */ \
Hf [i] = mark1 ; /* mark as seen */\
GB_MULT_A_ik_B_kj ; /* t = aik*bkj */ \
GB_HX_WRITE (i, t) ; /* Hx [i] = t */ \
Ci [pC++] = i ; /* C(:,j) pattern */ \
} \
else if (hf == mark1) \
{ \
/* C(i,j) += A(i,k) * B(k,j) */ \
GB_MULT_A_ik_B_kj ; /* t = aik*bkj */ \
GB_HX_UPDATE (i, t) ;/* Hx [i] += t */ \
} \
}
GB_SCAN_M_j_OR_A_k ;
#undef GB_IKJ_VECTORIZE
#undef GB_IKJ_IVDEP
#undef GB_IKJ
}
GB_SORT_AND_GATHER_C_j ; // gather into C(:,j)
}
}
}
else
{
//----------------------------------------------------------
// phase5: coarse Gustavson task, C<!M>=A*B
//----------------------------------------------------------
// if !M:
// Hf [i] < mark : M(i,j)=0, C(i,j) is not yet seen.
// Hf [i] == mark : M(i,j)=1, so C(i,j) is ignored.
// Hf [i] == mark+1 : M(i,j)=0, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t pC = Cp [kk] ;
int64_t cjnz = Cp [kk+1] - pC ;
if (cjnz == 0) continue ; // nothing to do
GB_GET_B_j ; // get B(:,j)
if (cjnz == cvlen) // C(:,j) is dense
{
GB_COMPUTE_DENSE_C_j ; // C(:,j) = A*B(:,j)
continue ; // no need to examine M(:,j)
}
GB_GET_M_j ; // get M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
// scatter M(:,j)
GB_SCATTER_M_j (pM_start, pM_end, mark) ;
if (16 * cjnz > cvlen) // C(:,j) is not very sparse
{
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
int64_t hf = Hf [i] ;
if (hf < mark)
{
// C(i,j) = A(i,k) * B(k,j)
Hf [i] = mark1 ; // mark as seen
GB_MULT_A_ik_B_kj ; // t =A(i,k)*B(k,j)
GB_HX_WRITE (i, t) ; // Hx [i] = t
}
else if (hf == mark1)
{
// C(i,j) += A(i,k) * B(k,j)
GB_MULT_A_ik_B_kj ; // t =A(i,k)*B(k,j)
GB_HX_UPDATE (i, t) ;// Hx [i] += t
}
}
}
GB_GATHER_ALL_C_j(mark1) ; // gather into C(:,j)
}
else // C(:,j) is very sparse
{
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
int64_t hf = Hf [i] ;
if (hf < mark)
{
// C(i,j) = A(i,k) * B(k,j)
Hf [i] = mark1 ; // mark as seen
GB_MULT_A_ik_B_kj ; // t =A(i,k)*B(k,j)
GB_HX_WRITE (i, t) ; // Hx [i] = t
Ci [pC++] = i ; // create C(:,j) pattern
}
else if (hf == mark1)
{
// C(i,j) += A(i,k) * B(k,j)
GB_MULT_A_ik_B_kj ; // t =A(i,k)*B(k,j)
GB_HX_UPDATE (i, t) ; // Hx [i] += t
}
}
}
GB_SORT_AND_GATHER_C_j ; // gather into C(:,j)
}
}
}
}
else
{
//--------------------------------------------------------------
// phase5: coarse hash task
//--------------------------------------------------------------
int64_t *GB_RESTRICT Hi = TaskList [taskid].Hi ;
int64_t hash_bits = (hash_size-1) ;
if (M == NULL)
{
//----------------------------------------------------------
// phase5: coarse hash task, C=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Let f = Hf [hash] and h = Hi [hash]
// f < mark : unoccupied.
// h == i, f == mark : occupied with C(i,j)
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t pC = Cp [kk] ;
int64_t cjnz = Cp [kk+1] - pC ;
if (cjnz == 0) continue ; // nothing to do
GB_GET_B_j ; // get B(:,j)
if (bjnz == 1) // C(:,j) = A(:,k)*B(k,j)
{
GB_COMPUTE_C_j_WHEN_NNZ_B_j_IS_ONE ;
continue ;
}
mark++ ;
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
for (GB_HASH (i)) // find i in hash table
{
if (Hf [hash] == mark)
{
// hash entry is occupied
if (Hi [hash] == i)
{
// i already in the hash table
// Hx [hash] += t ;
GB_HX_UPDATE (hash, t) ;
break ;
}
}
else
{
// hash entry is not occupied
Hf [hash] = mark ;
Hi [hash] = i ;
GB_HX_WRITE (hash, t) ;// Hx[hash]=t
Ci [pC++] = i ;
break ;
}
}
}
}
// found i if: Hf [hash] == mark and Hi [hash] == i
GB_SORT_AND_GATHER_HASHED_C_j (mark, Hi [hash] == i)
}
}
else if (mask_is_M)
{
//----------------------------------------------------------
// phase5: coarse hash task, C<M>=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Let h = Hi [hash] and f = Hf [hash].
// f < mark : M(i,j)=0, C(i,j) is ignored.
// h == i, f == mark : M(i,j)=1, and C(i,j) not yet seen.
// h == i, f == mark+1 : M(i,j)=1, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t pC = Cp [kk] ;
int64_t cjnz = Cp [kk+1] - pC ;
if (cjnz == 0) continue ; // nothing to do
GB_GET_M_j ; // get M(:,j)
GB_GET_M_j_RANGE (64) ; // get 1st & last in M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
GB_HASH_M_j ; // hash M(:,j)
GB_GET_B_j ; // get B(:,j)
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
GB_SKIP_IF_A_k_DISJOINT_WITH_M_j ;
GB_GET_B_kj ; // bkj = B(k,j)
#define GB_IKJ_VECTORIZE
#define GB_IKJ_IVDEP
#define GB_IKJ \
{ \
for (GB_HASH (i)) /* find i in hash */ \
{ \
int64_t f = Hf [hash] ; \
if (f < mark) break ; /* M(i,j)=0, ignore*/\
if (Hi [hash] == i) \
{ \
GB_MULT_A_ik_B_kj ; /* t = aik*bkj */ \
if (f == mark) /* if true, i is new */ \
{ \
/* C(i,j) is new */ \
Hf [hash] = mark1 ; /* mark seen */\
GB_HX_WRITE (hash, t) ;/*Hx[.]=t */\
Ci [pC++] = i ; \
} \
else \
{ \
/* C(i,j) has been seen; update */ \
GB_HX_UPDATE (hash, t) ; \
} \
break ; \
} \
} \
}
GB_SCAN_M_j_OR_A_k ;
#undef GB_IKJ_VECTORIZE
#undef GB_IKJ_IVDEP
#undef GB_IKJ
}
// found i if: Hf [hash] == mark1 and Hi [hash] == i
GB_SORT_AND_GATHER_HASHED_C_j (mark1, Hi [hash] == i) ;
}
}
else
{
//----------------------------------------------------------
// phase5: coarse hash task, C<!M>=A*B
//----------------------------------------------------------
// Initially, Hf [...] < mark for all of Hf.
// Let h = Hi [hash] and f = Hf [hash].
// f < mark: unoccupied, M(i,j)=0, and C(i,j) not yet seen.
// h == i, f == mark : M(i,j)=1. C(i,j) ignored.
// h == i, f == mark+1 : M(i,j)=0, and C(i,j) has been seen.
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t pC = Cp [kk] ;
int64_t cjnz = Cp [kk+1] - pC ;
if (cjnz == 0) continue ; // nothing to do
GB_GET_M_j ; // get M(:,j)
mark += 2 ;
int64_t mark1 = mark+1 ;
GB_HASH_M_j ; // hash M(:,j)
GB_GET_B_j ; // get B(:,j)
for ( ; pB < pB_end ; pB++) // scan B(:,j)
{
int64_t k = Bi [pB] ; // get B(k,j)
GB_GET_A_k ; // get A(:,k)
if (aknz == 0) continue ;
GB_GET_B_kj ; // bkj = B(k,j)
// scan A(:,k)
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ; // get A(i,k)
for (GB_HASH (i)) // find i in hash
{
int64_t f = Hf [hash] ;
if (f < mark) // if true, i is new
{
// C(i,j) is new
Hf [hash] = mark1 ; // mark C(i,j) seen
Hi [hash] = i ;
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_HX_WRITE (hash, t) ; // Hx [hash] = t
Ci [pC++] = i ;
break ;
}
if (Hi [hash] == i)
{
if (f == mark1)
{
// C(i,j) has been seen; update it.
GB_MULT_A_ik_B_kj ;//t=A(i,k)*B(k,j)
GB_HX_UPDATE (hash, t) ;//Hx[ ] += t
}
break ;
}
}
}
}
// found i if: Hf [hash] == mark1 and Hi [hash] == i
GB_SORT_AND_GATHER_HASHED_C_j (mark1, Hi [hash] == i) ;
}
}
}
}
}
//==========================================================================
// phase6: final gather phase for fine hash tasks
//==========================================================================
if (cjnz_max > 0)
{
int64_t *GB_RESTRICT W = NULL ;
int nthreads_msort = GB_MSORT_NTHREADS (nthreads) ;
if (cjnz_max <= GB_BASECASE) nthreads_msort = 1 ;
if (nthreads_msort > 1)
{
// allocate workspace for parallel mergesort
GB_MALLOC_MEMORY (W, cjnz_max, sizeof (int64_t)) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
}
for (taskid = 0 ; taskid < nfine ; taskid++)
{
int64_t hash_size = TaskList [taskid].hsize ;
bool use_Gustavson = (hash_size == cvlen) ;
if (!use_Gustavson && taskid == TaskList [taskid].master)
{
//--------------------------------------------------------------
// phase6: fine hash task, C=A*B, C<M>=A*B, C<!M>=A*B
//--------------------------------------------------------------
// (Hf [hash] & 3) == 2 if C(i,j) is an entry in C(:,j),
// and the index i of the entry is (Hf [hash] >> 2) - 1.
int64_t kk = TaskList [taskid].vector ;
int64_t hash_bits = (hash_size-1) ;
int64_t *GB_RESTRICT
Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ;
int64_t cjnz = Cp [kk+1] - Cp [kk] ;
// sort the pattern of C(:,j)
int nth = GB_nthreads (cjnz, chunk, nthreads_msort) ;
GB_msort_1 (Ci + Cp [kk], W, cjnz, nth) ;
#if !GB_IS_ANY_PAIR_SEMIRING
GB_CTYPE *GB_RESTRICT Hx =
(GB_CTYPE *) TaskList [taskid].Hx ;
// gather the values of C(:,j)
int64_t pC ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (pC = Cp [kk] ; pC < Cp [kk+1] ; pC++)
{
int64_t i = Ci [pC] ; // get C(i,j)
int64_t i1 = i + 1 ;
for (GB_HASH (i)) // find i in hash table
{
int64_t hf = Hf [hash] ;
if ((hf & 3) == 2 && (hf >> 2) == i1)
{
// found i in the hash table
GB_CIJ_GATHER (pC, hash) ; // Cx[pC] = Hx[hash]
break ;
}
}
}
#endif
}
}
// free workspace
GB_FREE_MEMORY (W, cjnz_max, sizeof (int64_t)) ;
}
}
#undef Cx
#undef Hx
|
idaFoodWeb_kry_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2022, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDA: Food web problem, OpenMP, GMRES,
* user-supplied preconditioner
*
* This example program uses SUNLinSol_SPGMR as the linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDA using the SUNLinSol_SPGMR linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idaFoodWeb_kry_omp
* To specify the number of threads at the command line, use
* % ./idaFoodWeb_kry_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ida/ida.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_dense.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* helpful macros */
#ifndef MAX
#define MAX(A, B) ((A) > (B) ? (A) : (B))
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
realtype **PP[MX][MY];
sunindextype *pivot[MX][MY];
N_Vector rates;
N_Vector ewt;
void *ida_mem;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
static int Precond(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, realtype cj, void *user_data);
static int PSolve(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, N_Vector rvec, N_Vector zvec,
realtype cj, realtype delta, void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(int maxl, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, jx, jy, retval;
int maxl;
realtype rtol, atol, t0, tout, tret;
int num_threads;
SUNContext ctx;
ida_mem = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS */
#endif
if (argc > 1) /* overwrithe with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Create the SUNDIALS context object for this simulation */
retval = SUNContext_Create(NULL, &ctx);
if (check_retval(&retval, "SUNContext_Create", 1)) return 1;
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads, ctx);
webdata->acoef = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->ewt = N_VNew_OpenMP(NEQ, num_threads, ctx);
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy < MY; jy++) {
(webdata->pivot)[jx][jy] = SUNDlsMat_newIndexArray(NUM_SPECIES);
(webdata->PP)[jx][jy] = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES);
}
}
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate(ctx);
if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
webdata->ida_mem = ida_mem;
/* Create SUNLinSol_SPGMR linear solver, attach to IDA, and set
preconditioning routines. */
maxl = 16; /* max dimension of the Krylov subspace */
LS = SUNLinSol_SPGMR(cc, SUN_PREC_LEFT, maxl, ctx); /* IDA only allows left preconditioning */
if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, NULL);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
retval = IDASetPreconditioner(ida_mem, Precond, PSolve);
if(check_retval(&retval, "IDASetPreconditioner", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(maxl, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
N_VDestroy(cc);
N_VDestroy(cp);
N_VDestroy(id);
SUNDlsMat_destroyMat(webdata->acoef);
N_VDestroy(webdata->rates);
N_VDestroy(webdata->ewt);
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy < MY; jy ++) {
SUNDlsMat_destroyArray((webdata->pivot)[jx][jy]);
SUNDlsMat_destroyMat((webdata->PP)[jx][jy]);
}
}
free(webdata);
SUNContext_Free(&ctx);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
jx = jy = is = 0;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
static int Precond(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, realtype cj, void *user_data)
{
int retval;
sunindextype ret;
realtype uround, xx, yy, del_x, del_y;
realtype **Pxy, *ratesxy, *Pxycol, *cxy, *cpxy, *ewtxy, cctmp;
realtype inc, fac, sqru, perturb_rates[NUM_SPECIES];
int is, js, jx, jy;
void *ida_mem;
N_Vector ewt;
realtype hh;
UserData webdata;
webdata = (UserData) user_data;
del_x = webdata->dx;
del_y = webdata->dy;
uround = UNIT_ROUNDOFF;
sqru = sqrt(uround);
ida_mem = webdata->ida_mem;
ewt = webdata->ewt;
retval = IDAGetErrWeights(ida_mem, ewt);
if(check_retval(&retval, "IDAGetErrWeights", 1)) return(1);
retval = IDAGetCurrentStep(ida_mem, &hh);
if(check_retval(&retval, "IDAGetCurrentStep", 1)) return(1);
for (jy = 0; jy < MY; jy++) {
yy = jy * del_y;
for (jx = 0; jx < MX; jx++) {
xx = jx * del_x;
Pxy = (webdata->PP)[jx][jy];
cxy = IJ_Vptr(cc, jx, jy);
cpxy = IJ_Vptr(cp, jx, jy);
ewtxy = IJ_Vptr(ewt, jx, jy);
ratesxy = IJ_Vptr((webdata->rates), jx, jy);
for (js = 0; js < NUM_SPECIES; js++) {
inc = sqru*(MAX(fabs(cxy[js]), MAX(hh*fabs(cpxy[js]), ONE/ewtxy[js])));
cctmp = cxy[js];
cxy[js] += inc;
fac = -ONE/inc;
WebRates(xx, yy, cxy, perturb_rates, webdata);
Pxycol = Pxy[js];
for (is = 0; is < NUM_SPECIES; is++)
Pxycol[is] = (perturb_rates[is] - ratesxy[is])*fac;
if (js < 1) Pxycol[js] += cj;
cxy[js] = cctmp;
}
ret = SUNDlsMat_denseGETRF(Pxy, NUM_SPECIES, NUM_SPECIES, (webdata->pivot)[jx][jy]);
if (ret != 0) return(1);
}
}
return(0);
}
static int PSolve(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, N_Vector rvec, N_Vector zvec,
realtype cj, realtype dalta, void *user_data)
{
realtype **Pxy, *zxy;
sunindextype *pivot;
sunindextype jx, jy;
UserData webdata;
jx = jy = 0;
webdata = (UserData) user_data;
N_VScale(ONE, rvec, zvec);
#pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, pivot) schedule(static) num_threads(webdata->nthreads)
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy <MY; jy++) {
zxy = IJ_Vptr(zvec, jx, jy);
Pxy = (webdata->PP)[jx][jy];
pivot = (webdata->pivot)[jx][jy];
SUNDlsMat_denseGETRS(Pxy, NUM_SPECIES, pivot, zxy);
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
sunindextype i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(int maxl, realtype rtol, realtype atol)
{
printf("\nidaFoodWeb_kry_omp: Predator-prey DAE OpenMP example problem using Krylov solver for IDA \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNLinSol_SPGMR, maxl = %d\n",maxl);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, sli, netf, nps, npevals, nrevalsLS;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumLinIters(ida_mem, &sli);
check_retval(&retval, "IDAGetNumLinIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumPrecSolves(ida_mem, &nps);
check_retval(&retval, "IDAGetNumPrecSolves", 1);
retval = IDAGetNumPrecEvals(ida_mem, &npevals);
check_retval(&retval, "IDAGetNumPrecEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nrevalsLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre);
printf("Number of Preconditioner evaluations = %ld\n", npevals);
printf("Number of linear iterations = %ld\n", sli);
printf("Number of error test failures = %ld\n", netf);
printf("Number of precond solve fun called = %ld\n", nps);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
jx = jy = is = 0;
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
androidfde_fmt_plug.c | /* androidfde.c
*
* hashkill - a hash cracking tool
* Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu>
*
* Modified for JtR and made stuff more generic
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_fde;
#elif FMT_REGISTERS_H
john_register_one(&fmt_fde);
#else
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "os.h"
#include "stdint.h"
#include <stdlib.h>
#include <sys/types.h>
#include <openssl/aes.h>
#include <string.h>
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memory.h"
#include "pbkdf2_hmac_sha1.h"
// NOTE, this format FAILS for generic sha2. It could be due to interaction between openssl/aes and generic sha2 code.
#include "sha2.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 1
#endif
#include "memdbg.h"
#define FORMAT_TAG "$fde$"
#define TAG_LENGTH 5
#define FORMAT_LABEL "fde"
#define FORMAT_NAME "Android FDE"
#ifdef MMX_COEF
#define ALGORITHM_NAME "PBKDF2-SHA1 SHA256/AES " SHA1_N_STR MMX_TYPE
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 SHA256/AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define PLAINTEXT_LENGTH 64
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define SALT_SIZE sizeof(struct custom_salt)
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests fde_tests[] = {
{"$fde$16$04b36d4290b56e0fcca9778b74719ab8$16$b45f0f051f13f84872d1ef1abe0ada59$0f61d28f7466c0435040cc845a67e6734500de15df3ba6f48d2534ca2a7b8f910d7547357e8f1ec7364bab41383f5df9b5fb43fcd4a1e06189ce3c6ba77ec908b066e73a508e201c941fb409e9abdc051c3c052a735b01e56be61efa635e82cbceab18db1ba645b93f7befb83155852f0004a7c7d6800e9fa5f0d3c133dd2496f92110c3cdcfb16dcf57df8de830969e18514a34d4917de14597da19f9f7dc81eca2d7d461c91e0a8aeac06bafe89866d24f2b4991b4295b6277d0ff4ad97f1fa58e20f8a24e2062f84c318eb36cfbb4671117bc3522afcf7737353589cae0dce0d7c3341f457af654543758f3f005bd4d68fa2b35777cb2ea5f8f69c4debcfb1d8b2a601320e4f8621dc6e99434007388bdc0ceebc722f9ed44cbce3914bf144db332276e719f6b48108cde55916d861d19dc8c03ac76a2dad322457073111e441488228f13649073aa3aadfab51dadf89a0827acba284154a9e18d926facef43852a0733660a1fbcca8e81d2f41efd9f645a61f9395b75fc7ad446885d304808d511f2ba2e7c6138588c4292aee4ef6f2537bb00c7b015cee4a91d2defa87b67abc1315e71f0489e271673b36412377219e93aba6af3cfd504bf3f6bc24f2b6148536339d91ddd2f013314544650c1c11e7317028a7014909d0c850f78692e476c4f57da586fe26786504130aba22ba5261b989aeb47483d8cb9d5052120a4e5690b5b0cd009aadaadc351db7b6a230ebc1fa771651cb64d78daa56b7a6c6808db3b688afee9b7edaa617d8cb16ac7290465987bd443ea41ce38aa14e0c88874fb2707394b83679de82134efe351b4d021c63b2992a8314b2e93908906400628a7f753c9a4d85e917a207561b7840ce121800fab4026508d1b00fe8e7e756573743e11380f76f6bb7c0e528cb98875e6ad88bff51236601e6942964e37ffe0316b1a1f7bc0d84334fa024bf03c261bd06a07c01f099ad23fb9a1d8c98447463b8988cb33f3e1fb7d7a7c547f9a6d51cf7b75649d3c8cb5bf93be79eba1a961659b5fe928a1c7e80aca857825c6bc11493cb230e66126ef7b7284abe0823b5735bb1dfe844029f175c63442ca774784b775ecf02e48d029ac0f236813be91aca66905640666b89bd08118e3c18c75764bc49d00d1fe53ee92ccaa487852c613cba91f637b6de06dcaa1953a7cfb5333df573273a67f0157b63fbbf48c48f16c423caefaf29cdb5d34b19ac0f57b972b9e5ff1bc5cf25bdcdf8d29fb75865c4501458f19bfd64c844fd52a27feec97dc31ba922aea75706404d853071707d0c6001c59664676be6426ca5c7efbfc09ffa9acac91441f9175fd3148fb046c31a49d7c7ad10bf3c4b413dd148666b72b5a533f600cb02d7623270e5d1ad33355dd318d06aa8b3d7517cb7d5be40d222a026380cfbf5b79014e7631d677b07bcd805d9ea7103cf1d057bf883b29fb99b064c4e3cb4271596a74895c1c3f7c7c49d2be54b1435af4440ecd019dde11cee14a320712c9275bef339a15d3a18d9f38918d7af0a50a35199980429d74d4cc2a16dea619619a7c19827f4f78d3ebaf13340abf6717cec6bff8399b067fb17f11cdb1f9909c51253f7466ee769546d1d96319bcc1b04a6b1f8d8068f96b959d507c9004d75717792733fadb7a94a2d5db514a61cbd90eef89d1ace5a3138120168d62f1ebef5efbbd4e7f7e987834db81fe8c4877f3edcc71c61e96b20ca26c5a91e28fa11e484c1dcbfd5a0461065fe52f042ee9a09687d800c90a0a792f3dbe257965247f8eecd122b9b234b734454fa1477212a0295a347ae44463de4de405bf4fd91cde400b63d7fced6d7ccd20d79a4899139a79085f8742c3dfe7fbadca56c4e8aa95ce7841ad9675659349f6671d047efa0951feb9c61381f5f9e39182c1ec0a3ebd2ef5e036312c6ed6a0e59777813229ffdac771788e609c7d9f96848f63b428789c55e85c509068df8d5a0a7fc066be8c76205860d86d6c5bb7c2bc85a922a2ad86e6a791fe238420eedd1cf7ac770dd8316ca30c9577441a34873cdf0c5dc2103457a93fa0dd42da5eb2d6f82e9ff47b4bb6cd1d3fcba5645caace577a89c7bd70ff432f8dae113a7877a41a41043dac4c0d21860ad8198a1b9640d979322a20d4b90caa77a5d2b31c5bd06e", "strongpassword"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int max_cracked;
static struct custom_salt {
int loaded;
unsigned char *cipherbuf;
int keysize;
int iterations; // NOTE, not used. Hard coded to 2000 for FDE from droid <= 4.3 (PBKDF2-sha1)
int saltlen;
unsigned char data[512 * 3];
unsigned char salt[16];
unsigned char mkey[64];
unsigned char iv[16];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_tiny(sizeof(*cracked) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
max_cracked = self->params.max_keys_per_crypt;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
int saltlen, keysize;
char *p;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtok(ctcopy, "$")) == NULL)
goto err;
saltlen = atoi(p);
if(saltlen > 16) /* saltlen */
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p) != saltlen * 2)
goto err;
if ((p = strtok(NULL, "*$")) == NULL) /* keysize */
goto err;
keysize = atoi(p);
if(keysize > 64)
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* key */
goto err;
if(strlen(p) != keysize * 2)
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* data */
goto err;
if(strlen(p) != 512 * 3 * 2)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
// int res;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LENGTH;
p = strtok(ctcopy, "$");
cs.saltlen = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cs.saltlen; i++) {
cs.salt[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
p = strtok(NULL, "$");
cs.keysize = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cs.keysize; i++) {
cs.mkey[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
p = strtok(NULL, "$");
for (i = 0; i < 512 * 3; i++) {
cs.data[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// Not reference implementation - this is modified for use by androidfde!
static void AES_cbc_essiv(unsigned char *src, unsigned char *dst, unsigned char *key, int startsector,int size)
{
AES_KEY aeskey;
unsigned char essiv[16];
unsigned char essivhash[32];
SHA256_CTX ctx;
unsigned char sectorbuf[16];
unsigned char zeroiv[16];
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, cur_salt->keysize);
SHA256_Final(essivhash, &ctx);
memset(sectorbuf,0,16);
memset(zeroiv,0,16);
memset(essiv,0,16);
memcpy(sectorbuf,&startsector,4);
AES_set_encrypt_key(essivhash, 256, &aeskey);
AES_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT);
AES_set_decrypt_key(key, cur_salt->keysize*8, &aeskey);
AES_cbc_encrypt(src, dst, size, &aeskey, essiv, AES_DECRYPT);
}
// cracked[index] = hash_plugin_check_hash(saved_key[index]);
void hash_plugin_check_hash(int index)
{
unsigned char keycandidate2[255];
unsigned char decrypted1[512]; // FAT
unsigned char decrypted2[512]; // ext3/4
AES_KEY aeskey;
uint16_t v2,v3,v4;
uint32_t v1,v5;
int j = 0;
#ifdef MMX_COEF
unsigned char *keycandidate, Keycandidate[SSE_GROUP_SZ_SHA1][255];
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (ARCH_WORD_32*)(Keycandidate[i]);
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, 16,
2000, &(x.poutc), cur_salt->keysize + 16, 0);
#else
unsigned char keycandidate[255];
char *password = saved_key[index];
pbkdf2_sha1((const uint8_t*)password, strlen(password), (const uint8_t*)(cur_salt->salt),
16, 2000, keycandidate, cur_salt->keysize + 16, 0);
#endif
#if !ARCH_LITTLE_ENDIAN
{
int i;
for (i = 0; i < (cur_salt->keysize + 16)/sizeof(ARCH_WORD_32); ++i) {
((ARCH_WORD_32*)keycandidate)[i] = JOHNSWAP(((ARCH_WORD_32*)keycandidate)[i]);
}
}
#endif
j = 0;
#ifdef MMX_COEF
for (; j < SSE_GROUP_SZ_SHA1; ++j) {
keycandidate = Keycandidate[j];
#endif
AES_set_decrypt_key(keycandidate, cur_salt->keysize*8, &aeskey);
AES_cbc_encrypt(cur_salt->mkey, keycandidate2, 16, &aeskey, keycandidate+16, AES_DECRYPT);
AES_cbc_essiv(cur_salt->data, decrypted1, keycandidate2,0,32);
AES_cbc_essiv(cur_salt->data + 1024, decrypted2, keycandidate2,2,128);
// Check for FAT
if ((memcmp(decrypted1+3,"MSDOS5.0",8)==0))
cracked[index+j] = 1;
else {
// Check for extfs
memcpy(&v1,decrypted2+72,4);
memcpy(&v2,decrypted2+0x3a,2);
memcpy(&v3,decrypted2+0x3c,2);
memcpy(&v4,decrypted2+0x4c,2);
memcpy(&v5,decrypted2+0x48,4);
#if !ARCH_LITTLE_ENDIAN
v1 = JOHNSWAP(v1);
v2 = JOHNSWAP(v2);
v3 = JOHNSWAP(v3);
v4 = JOHNSWAP(v4);
v5 = JOHNSWAP(v5);
#endif
if ((v1<5)&&(v2<4)&&(v3<5)&&(v4<2)&&(v5<5))
cracked[index+j] = 1;
}
#ifdef MMX_COEF
}
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*max_cracked);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
hash_plugin_check_hash(index);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void fde_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_fde = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fde_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
fde_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
jacobi_omp.c | /*
* Copyright (c) 2008, BSC (Barcelon Supercomputing Center)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <time.h>
#define NB 64
#define B 128
#define FALSE (0)
#define TRUE (1)
typedef double fp_type;
typedef fp_type *vin;
typedef fp_type *vout;
typedef fp_type *bin;
typedef fp_type *binout;
fp_type *A[NB][NB];
fp_type *A_new[NB][NB];
fp_type *tmp[NB][NB];
void alloc_and_genmat()
{
int init_val, i, j, ii, jj;
fp_type *p, *p_new;
init_val = 1325;
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL)
{
printf("Out of memory\n");
exit(1);
}
p = A[ii][jj];
p_new = A_new[ii][jj];
for (i = 0; i < B; i++)
{
for (j = 0; j < B; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (fp_type)((init_val - 32768.0) / 16384.0);
(*p_new) = (*p);
p++;
p_new++;
}
}
}
}
}
long usecs(void)
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1000000 + t.tv_usec;
}
void clear(vout v)
{
int i, j, k;
for (i = 0; i < B; i++)
v[i] = (fp_type)0.0;
}
void getlastrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[(B - 1) * B + j];
}
void getlastcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + B - 1];
}
void getfirstrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[0 * B + j];
}
void getfirstcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + 0];
}
void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new)
{
int i, j;
fp_type tmp;
fp_type left, top, right, bottom;
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
tmp = A[i * B + j];
left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]);
top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]);
right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]);
bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]);
A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom);
}
}
}
double maxdelta()
{
double dmax = -__DBL_MAX__;
int ii, jj, i, j;
#pragma omp parallel for schedule(static) reduction(max: dmax)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]);
if(diff > dmax) dmax = diff;
}
}
}
}
return dmax;
}
void compute(int niters)
{
int iters;
int ii, jj;
fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B];
double delta = 2.0;
double epsilon = 1e-7;
iters = 0;
// for (iters = 0; iters < niters; iters++)
while(iters < niters)
{
++iters;
#pragma omp parallel \
private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \
shared(A, A_new)
{
#pragma omp for schedule(static)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
if (ii > 0)
getlastrow(A[ii - 1][jj], tophalo);
else
clear(tophalo);
if (jj > 0)
getlastcol(A[ii][jj - 1], lefthalo);
else
clear(lefthalo);
if (ii < NB - 1)
getfirstrow(A[ii + 1][jj], bottomhalo);
else
clear(bottomhalo);
if (jj < NB - 1)
getfirstcol(A[ii][jj + 1], righthalo);
else
clear(lefthalo);
jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]);
} // jj
} // ii
} // end parallel
delta = maxdelta();
printf("iteration %d: delta = %e\n", iters, delta);
// yes, this is an inefficient copy
// however, the library version requires you to do a copy in this way
// on all of the component parts to avoid segmentation fault
#pragma omp parallel for schedule(static) shared(A, A_new)
for(int i = 0; i < NB; ++i)
{
for(int j = 0; j < NB; ++j)
{
for(int k = 0; k < B; ++k)
for(int l = 0; l < B; ++l)
A[i][j][k * B + l] = A_new[i][j][k * B + l];
}
}
} // iter
}
int main(int argc, char *argv[])
{
int niters;
// pp_time_t tm;
// memset( &tm, 0, sizeof(tm) );
struct timespec start, end;
if (argc > 1)
{
niters = atoi(argv[1]);
}
else
niters = 1;
alloc_and_genmat();
clock_gettime(CLOCK_MONOTONIC, &start);
compute(niters);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Running time = %g %s\n", time_taken, "s");
/* FILE *outFile;
outFile = fopen("./jacobi_omp_values.txt", "w");
if (outFile == NULL)
{
fprintf(stderr, "Error writing to file\n");
}
else
{
int ii, jj, i, j;
for (ii = 0; ii < NB; ++ii)
for (jj = 0; jj < NB; ++jj)
for (i = 0; i < B; ++i)
for (j = 0; j < B; ++j)
fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]);
fclose(outFile);
} */
return 0;
} |
CameraUtil.h | #pragma once
namespace CameraUtil {
inline float gaussR(float sigma, float dist)
{
return std::exp(-(dist*dist) / (2.0f*sigma*sigma));
}
inline float linearR(float sigma, float dist)
{
return std::max(1.0f, std::min(0.0f, 1.0f - (dist*dist) / (2.0f*sigma*sigma)));
}
inline float gaussD(float sigma, int x, int y)
{
return std::exp(-((x*x + y*y) / (2.0f*sigma*sigma)));
}
inline float gaussD(float sigma, int x)
{
return std::exp(-((x*x) / (2.0f*sigma*sigma)));
}
void bilateralFilter(const DepthImage32& input, float sigmaD, float sigmaR, DepthImage32& output)
{
if (output.getDimensions() != input.getDimensions())
output.allocateSameSize(input);
output.setPixels(-std::numeric_limits<float>::infinity());
const int kernelRadius = (int)std::ceil(2.0*sigmaD);
#pragma omp parallel for
for (int y = 0; y < (int)input.getHeight(); y++) {
for (int x = 0; x < (int)input.getWidth(); x++) {
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = input(x, y);
if (depthCenter != -std::numeric_limits<float>::infinity())
{
for (int m = (int)x - kernelRadius; m <= (int)x + kernelRadius; m++)
{
for (int n = (int)y - kernelRadius; n <= (int)y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < (int)input.getWidth() && n < (int)input.getHeight())
{
const float currentDepth = input(m, n);
if (currentDepth != -std::numeric_limits<float>::infinity()) {
const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
sumWeight += weight;
sum += weight*currentDepth;
}
}
}
}
if (sumWeight > 0.0f) output(x, y) = sum / sumWeight;
else output(x, y) = -std::numeric_limits<float>::infinity();
}
else output(x, y) = -std::numeric_limits<float>::infinity();
} //x
} //y
}
// thresh: max percentage of valid neighbors to be an edge
void computeEdgeMask(const DepthImage32& input, float depthThresh, float thresh, int radius, BaseImage<unsigned char>& mask)
{
if (mask.getDimensions() != input.getDimensions())
mask.allocate(input.getWidth(), input.getHeight());
memset(mask.getData(), 0, sizeof(unsigned char)*mask.getNumPixels());
const int size = (2 * radius + 1) * (2 * radius + 1);
for (unsigned int y = 0; y < input.getHeight(); y++) {
for (unsigned int x = 0; x < input.getWidth(); x++) {
unsigned int count = 0;
const float depthCenter = input(x, y);
if (depthCenter != -std::numeric_limits<float>::infinity())
{
for (int m = (int)x - radius; m <= (int)x + radius; m++)
{
for (int n = (int)y - radius; n <= (int)y + radius; n++)
{
if (m >= 0 && n >= 0 && m < (int)input.getWidth() && n < (int)input.getHeight())
{
const float currentDepth = input(m, n);
if (currentDepth != -std::numeric_limits<float>::infinity() && std::fabs(depthCenter - currentDepth) <= depthThresh) {
count++;
}
}
}
}
if ((float)count/(float)size <= thresh) mask(x, y) = 2; // edge
else mask(x, y) = 1; //non-edge
}
else mask(x, y) = 0; //nothing
} //x
} //y
}
} // namespace CameraUtil |
GB_unop__abs_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_int16_int16)
// op(A') function: GB (_unop_tran__abs_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_int16_int16)
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
collapse-2.c | /* { dg-do run } */
#include <stdlib.h>
#include <omp.h>
int
main (void)
{
int i, j, k, l = 0, f = 0;
int m1 = 4, m2 = -5, m3 = 17;
#pragma omp parallel for num_threads (8) collapse(3) \
schedule(static, 9) reduction(+:l) \
firstprivate(f)
for (i = -2; i < m1; i++)
for (j = m2; j < -2; j++)
{
for (k = 13; k < m3; k++)
{
if (omp_get_num_threads () == 8
&& ((i + 2) * 12 + (j + 5) * 4 + (k - 13)
!= (omp_get_thread_num () * 9
+ f++)))
l++;
}
}
if (l)
abort ();
return 0;
}
|
copyin-2.c | /* { dg-do run } */
/* { dg-require-effective-target tls_runtime } */
#include <omp.h>
#include <stdlib.h>
struct { int t; char buf[64]; } thr = { 32, "" };
#pragma omp threadprivate (thr)
int
main (void)
{
int l = 0;
omp_set_dynamic (0);
omp_set_num_threads (6);
#pragma omp parallel copyin (thr) reduction (||:l)
{
l = thr.t != 32;
thr.t = omp_get_thread_num () + 11;
}
if (l || thr.t != 11)
abort ();
#pragma omp parallel reduction (||:l)
l = thr.t != omp_get_thread_num () + 11;
if (l)
abort ();
return 0;
}
|
single.c | /*
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include<assert.h>
int main(void)
{
int i = 100 ;
int num_threads =0;
#pragma omp parallel
{
#pragma omp single
{
num_threads = omp_get_num_threads();
#pragma omp atomic
i+=100;
}
#pragma omp single nowait
{
num_threads = omp_get_num_threads();
}
}
assert(i == 200);
return 0;
}
|
mmgraph-exp.h | #ifndef MGRAPH_H
#define MGRAPH_H
#include "network.h"
#include "networkmp.h"
template<class TNode>
class TMNet;
class TSVNode {
private:
TInt TypeId;
TInt Id;
TVec<TIntV > InEIdVV, OutEIdVV;
TInt InDeg, OutDeg;
public:
TSVNode() : TypeId(-1), Id(-1), InEIdVV(), OutEIdVV(), InDeg(0), OutDeg(0) { }
TSVNode(const int& NTypeId, const int& NId) : TypeId(NTypeId), Id(NId), InEIdVV(), OutEIdVV(), InDeg(0), OutDeg(0) { }
TSVNode(const TSVNode& Node) : TypeId(Node.TypeId), Id(Node.Id), InEIdVV(Node.InEIdVV), OutEIdVV(Node.OutEIdVV), InDeg(Node.InDeg), OutDeg(Node.OutDeg) { }
TSVNode(const TSVNode& Node, const TIntV& InETypeIdV, const TIntV& OutETypeIdV) :
TypeId(Node.TypeId), Id(Node.Id), InEIdVV(Node.InEIdVV.Len()), OutEIdVV(Node.OutEIdVV.Len()),
InDeg(0), OutDeg(0) {
for (int i = 0; i < InETypeIdV.Len(); i++) {
int ETypeId = InETypeIdV[i];
InEIdVV[ETypeId] = Node.InEIdVV[ETypeId];
InDeg += Node.InEIdVV[ETypeId].Len();
}
for (int i = 0; i < OutETypeIdV.Len(); i++) {
int ETypeId = OutETypeIdV[i];
OutEIdVV[ETypeId] = Node.OutEIdVV[ETypeId];
OutDeg += Node.OutEIdVV[ETypeId].Len();
}
}
TSVNode(TSIn& SIn) : TypeId(SIn), Id(SIn), InEIdVV(SIn), OutEIdVV(SIn), InDeg(0), OutDeg(0) { }
void Save(TSOut& SOut) const { TypeId.Save(SOut); Id.Save(SOut); InEIdVV.Save(SOut); OutEIdVV.Save(SOut); InDeg.Save(SOut); OutDeg.Save(SOut); }
int GetTypeId() const { return TypeId; }
int GetId() const { return Id; }
int GetDeg() const { return GetInDeg() + GetOutDeg(); }
int GetInDeg(const int& ETypeId) const {return InEIdVV[ETypeId].Len();}
int GetInDeg() const { return InDeg; }
int GetOutDeg(int ETypeId) const {return OutEIdVV[ETypeId].Len();}
int GetOutDeg() const { return OutDeg; }
void AddInETypeIds(const TIntV& ETypeIds) {
int MxETypeId = -1;
for (int i = 0; i < ETypeIds.Len(); i++) {
if (MxETypeId < ETypeIds[i]) { MxETypeId = ETypeIds[i]; }
}
InEIdVV.Reserve(MxETypeId+1, MxETypeId+1);
for (int i = 0; i < ETypeIds.Len(); i++) {
InEIdVV[ETypeIds[i]] = TIntV();
}
}
void AddOutETypeIds(const TIntV& ETypeIds) {
int MxETypeId = -1;
for (int i = 0; i < ETypeIds.Len(); i++) {
if (MxETypeId < ETypeIds[i]) { MxETypeId = ETypeIds[i]; }
}
OutEIdVV.Reserve(MxETypeId+1, MxETypeId+1);
for (int i = 0; i < ETypeIds.Len(); i++) {
OutEIdVV[ETypeIds[i]] = TIntV();
}
}
void AddInNbr(const int& ETypeId, const int& EId) { InEIdVV[ETypeId].Add(EId); InDeg++; }
void AddOutNbr(const int& ETypeId, const int& EId) { OutEIdVV[ETypeId].Add(EId); OutDeg++; }
void DelInNbr(const int& ETypeId, const int& EId) { InEIdVV[ETypeId].DelIfIn(EId); InDeg--; }
void DelOutNbr(const int& ETypeId, const int& EId) { OutEIdVV[ETypeId].DelIfIn(EId); OutDeg--; }
int GetInEId(const int& EdgeN) const {
int CumSum = 0;
int ETypeId = 0;
for (; ETypeId < InEIdVV.Len(); ETypeId++) {
CumSum += InEIdVV[ETypeId].Len();
if (CumSum > EdgeN) { CumSum -= InEIdVV[ETypeId].Len(); break; }
}
return InEIdVV[ETypeId][EdgeN-CumSum];
}
int GetOutEId(const int& EdgeN) const {
int CumSum = 0;
int ETypeId = 0;
for (; ETypeId < OutEIdVV.Len(); ETypeId++) {
CumSum += OutEIdVV[ETypeId].Len();
if (CumSum > EdgeN) { CumSum -= OutEIdVV[ETypeId].Len(); break; }
}
return OutEIdVV[ETypeId][EdgeN-CumSum];
}
void GetInEIdV(TIntV& EIdV) const {
EIdV.Gen(InDeg, 0);
for (int i = 0; i < InEIdVV.Len(); i++) {
EIdV.AddV(InEIdVV[i]);
}
}
void GetOutEIdV(TIntV& EIdV) const {
EIdV.Gen(OutDeg, 0);
for (int i = 0; i < OutEIdVV.Len(); i++) {
EIdV.AddV(OutEIdVV[i]);
}
}
void GetInEIdV(const TInt ETypeId, TIntV& EIdV) const {
EIdV = InEIdVV[ETypeId.Val];
}
void GetOutEIdV(const TInt ETypeId, TIntV& EIdV) const {
EIdV = OutEIdVV[ETypeId.Val];
}
void GetInEIdV(const TIntV& ETypeIdV, TIntV& EIdV) const {
EIdV.Reserve(InDeg, 0);
for (int k = 0; k < ETypeIdV.Len(); k++) {
EIdV.AddV(InEIdVV[ETypeIdV[k].Val]);
}
}
void GetOutEIdV(const TIntV& ETypeIdV, TIntV& EIdV) const {
EIdV.Reserve(OutDeg, 0);
for (int k = 0; k < ETypeIdV.Len(); k++) {
EIdV.AddV(OutEIdVV[ETypeIdV[k].Val]);
}
}
friend class TMNet<TSVNode>;
};
class TMVNode {
private:
TInt TypeId; // Node type ID
TInt Id; // Get global ID
TIntV InEIdV, OutEIdV; // Vectors of EIds
TIntV InETypeIdV, OutETypeIdV; // Vectors of ETypeIds
public:
TMVNode() : TypeId(-1), Id(-1), InEIdV(), OutEIdV(), InETypeIdV(), OutETypeIdV() { }
TMVNode(const int& NTypeId, const int& NId) : TypeId(NTypeId), Id(NId), InEIdV(), OutEIdV(), InETypeIdV(), OutETypeIdV() { }
TMVNode(const TMVNode& Node) : TypeId(Node.TypeId), Id(Node.Id), InEIdV(Node.InEIdV), OutEIdV(Node.OutEIdV),
InETypeIdV(Node.InETypeIdV), OutETypeIdV(Node.OutETypeIdV) { }
TMVNode(const TMVNode& Node, const TIntV& InETypeIdV, const TIntV& OutETypeIdV) :
TypeId(Node.TypeId), Id(Node.Id), InEIdV(Node.InEIdV.Len()), OutEIdV(Node.OutEIdV.Len()),
InETypeIdV(Node.InETypeIdV.Len()), OutETypeIdV(Node.OutETypeIdV.Len()) {
TIntSet InETypeIdSet(InETypeIdV);
for (int i = 0; i < Node.InEIdV.Len(); i++) {
if (InETypeIdSet.IsKey(Node.InETypeIdV[i])) {
InEIdV.Add(Node.InEIdV[i]);
}
}
TIntSet OutETypeIdSet(OutETypeIdV);
for (int i = 0; i < Node.OutEIdV.Len(); i++) {
if (OutETypeIdSet.IsKey(Node.OutETypeIdV[i])) {
OutEIdV.Add(Node.OutEIdV[i]);
}
}
}
TMVNode(TSIn& SIn) : TypeId(SIn), Id(SIn), InEIdV(SIn), OutEIdV(SIn), InETypeIdV(SIn), OutETypeIdV(SIn) { }
void Save(TSOut& SOut) const { TypeId.Save(SOut); Id.Save(SOut); InEIdV.Save(SOut); OutEIdV.Save(SOut);
InETypeIdV.Save(SOut); OutETypeIdV.Save(SOut); }
int GetTypeId() const { return TypeId; }
int GetId() const { return Id; }
int GetDeg() const { return GetInDeg() + GetOutDeg(); }
int GetInDeg() const { return InEIdV.Len(); }
int GetOutDeg() const { return OutEIdV.Len(); }
int GetInEId(const int& EdgeN) const { return InEIdV[EdgeN]; }
int GetOutEId(const int& EdgeN) const { return OutEIdV[EdgeN]; }
int GetNbrEId(const int& EdgeN) const { return EdgeN<GetOutDeg()?GetOutEId(EdgeN):GetInEId(EdgeN-GetOutDeg()); }
void GetInEIdV(TIntV& EIdV) const { EIdV = InEIdV; }
void GetOutEIdV(TIntV& EIdV) const { EIdV = OutEIdV; }
bool IsInEId(const int& EId) const { return InEIdV.SearchForw(EId) != -1; }
bool IsOutEId(const int& EId) const { return OutEIdV.SearchForw(EId) != -1; }
void AddInETypeIds(const TIntV& ETypeIds) { } // Do nothing.
void AddOutETypeIds(const TIntV& ETypeIds) { } // Do nothing.
void AddInNbr(const int& ETypeId, const int& EId) {
InETypeIdV.Add(ETypeId);
InEIdV.Add(EId);
}
void AddOutNbr(const int& ETypeId, const int& EId) {
OutETypeIdV.Add(ETypeId);
OutEIdV.Add(EId);
}
void DelInNbr(const int& ETypeId, const int& EId) {
int EIdN = InEIdV.SearchBack(EId);
InETypeIdV.Del(EIdN);
InEIdV.Del(EIdN);
}
void DelOutNbr(const int& ETypeId, const int& EId) {
int EIdN = OutEIdV.SearchBack(EId);
OutETypeIdV.Del(EIdN);
OutEIdV.Del(EIdN);
}
void GetInEIdV(const TInt& ETypeId, TIntV& EIdV) const {
EIdV.Reduce(0); // Clear
for (int i = 0; i < InEIdV.Len(); i++) {
if (InETypeIdV[i] == ETypeId) { EIdV.Add(InEIdV[i]); }
}
}
void GetOutEIdV(const TInt& ETypeId, TIntV& EIdV) const {
EIdV.Reduce(0); // Clear
for (int i = 0; i < OutEIdV.Len(); i++) {
if (OutETypeIdV[i] == ETypeId) { EIdV.Add(OutEIdV[i]); }
}
}
void GetInEIdV(const TIntV& ETypeIdV, TIntV& EIdV) const {
EIdV.Reserve(InEIdV.Len(), 0);
for (int k = 0; k < ETypeIdV.Len(); k++) {
TInt ETypeId = ETypeIdV[k];
for (int i = 0; i < InETypeIdV.Len(); i++) {
if (InETypeIdV[i] == ETypeId) { EIdV.Add(InEIdV[i]); }
}
}
}
void GetOutEIdV(const TIntV& ETypeIdV, TIntV& EIdV) const {
EIdV.Reserve(OutEIdV.Len(), 0);
for (int k = 0; k < ETypeIdV.Len(); k++) {
TInt ETypeId = ETypeIdV[k];
for (int i = 0; i < OutETypeIdV.Len(); i++) {
if (OutETypeIdV[i] == ETypeId) { EIdV.Add(OutEIdV[i]); }
}
}
}
friend class TMNet<TMVNode>;
};
class TCVNode {
public:
static const int DEF_WEIGHT;
static const int DEF_WEIGHT_COEFF;
static const int DEF_EXPAND_RATIO;
private:
static void RedistributeEIds(const TIntV& Weights, TIntV& EIdV, TIntV& TypeIndexV, TIntV& TypeDegV) {
IAssertR(TypeIndexV.Len() == TypeDegV.Len(), TStr::Fmt("The node is in inconsistent state."));
// Get new TypeIndex
int NTypes = Weights.Len();
TIntV NewTypeIndexV(NTypes); // number of types
int CumSum = 0; // cumulative sum of weights
for (int ETypeId = 0; ETypeId < NTypes; ETypeId++) {
NewTypeIndexV[ETypeId] = CumSum;
CumSum += Weights[ETypeId] * DEF_WEIGHT_COEFF;
}
TIntV NewEIdV(CumSum);
// Copy data from old positions to new positions
for (int ETypeId = TypeIndexV.Len() - 1; ETypeId >= 0; ETypeId--) {
IAssertR(CumSum >= NewTypeIndexV[ETypeId] + TypeDegV[ETypeId], TStr::Fmt("The node is in inconsistent state."));
for (int i = 0; i < TypeDegV[ETypeId]; i++) {
NewEIdV[NewTypeIndexV[ETypeId] + i] = EIdV[TypeIndexV[ETypeId] + i];
}
}
TypeDegV.Reserve(NTypes, NTypes);
TypeIndexV = NewTypeIndexV;
EIdV = NewEIdV;
}
private:
TInt TypeId; // Node type ID
TInt Id; // Get global ID
TIntV InEIdV, OutEIdV;
TInt InDeg, OutDeg;
TIntV InTypeIndexV, OutTypeIndexV;
TIntV InTypeDegV, OutTypeDegV;
public:
TCVNode() : TypeId(-1), Id(-1), InEIdV(), OutEIdV(), InDeg(0), OutDeg(0),
InTypeIndexV(), OutTypeIndexV(), InTypeDegV(), OutTypeDegV() { }
TCVNode(const int& NTypeId, const int& NId) : TypeId(NTypeId), Id(NId), InEIdV(), OutEIdV(), InDeg(0), OutDeg(0),
InTypeIndexV(), OutTypeIndexV(), InTypeDegV(), OutTypeDegV() { }
TCVNode(const TCVNode& Node) : TypeId(Node.TypeId), Id(Node.Id), InEIdV(Node.InEIdV), OutEIdV(Node.OutEIdV),
InDeg(Node.InDeg), OutDeg(Node.OutDeg), InTypeIndexV(Node.InTypeIndexV), OutTypeIndexV(Node.OutTypeIndexV),
InTypeDegV(Node.InTypeDegV), OutTypeDegV(Node.OutTypeDegV) { }
TCVNode(const TCVNode& Node, const TIntV& InETypeIdV, const TIntV& OutETypeIdV) :
TypeId(Node.TypeId), Id(Node.Id), InDeg(0), OutDeg(0),
InTypeIndexV(Node.InTypeIndexV.Len()), OutTypeIndexV(Node.OutTypeIndexV.Len()),
InTypeDegV(Node.InTypeDegV.Len()), OutTypeDegV(Node.OutTypeDegV.Len()) {
for (TIntV::TIter iter = InETypeIdV.BegI(); iter < InETypeIdV.EndI(); iter++) {
InDeg += Node.InTypeDegV[*iter];
InTypeDegV[*iter] = Node.InTypeDegV[*iter];
}
int index = 0;
InEIdV.Gen(InDeg);
TIntSet InETypeIdSet(InETypeIdV);
for (int ETypeId = 0; ETypeId < InTypeIndexV.Len(); ETypeId++) {
InTypeIndexV[ETypeId] = index;
if (InETypeIdSet.IsKey(ETypeId)) {
for (int i = Node.InTypeIndexV[ETypeId]; i < Node.InTypeIndexV[ETypeId] + Node.InTypeDegV[ETypeId]; i++) {
InEIdV[index++] = Node.InEIdV[i];
}
}
}
IAssert(index == InDeg);
for (TIntV::TIter iter = OutETypeIdV.BegI(); iter < OutETypeIdV.EndI(); iter++) {
OutDeg += Node.OutTypeDegV[*iter];
OutTypeDegV[*iter] = Node.OutTypeDegV[*iter];
}
index = 0;
OutEIdV.Gen(OutDeg);
TIntSet OutETypeIdSet(OutETypeIdV);
for (int ETypeId = 0; ETypeId < OutTypeIndexV.Len(); ETypeId++) {
OutTypeIndexV[ETypeId] = index;
if (OutETypeIdSet.IsKey(ETypeId)) {
for (int i = Node.OutTypeIndexV[ETypeId]; i < Node.OutTypeIndexV[ETypeId] + Node.OutTypeDegV[ETypeId]; i++) {
OutEIdV[index++] = Node.OutEIdV[i];
}
}
}
IAssert(index == OutDeg);
}
TCVNode(TSIn& SIn) : TypeId(SIn), Id(SIn), InEIdV(SIn), OutEIdV(SIn), InDeg(SIn), OutDeg(SIn),
InTypeIndexV(SIn), OutTypeIndexV(SIn), InTypeDegV(SIn), OutTypeDegV(SIn) { }
void Save(TSOut& SOut) const { TypeId.Save(SOut); Id.Save(SOut); InEIdV.Save(SOut); OutEIdV.Save(SOut);
InDeg.Save(SOut); OutDeg.Save(SOut); InTypeIndexV.Save(SOut); OutTypeIndexV.Save(SOut);
InTypeDegV.Save(SOut); OutTypeDegV.Save(SOut); }
int GetTypeId() const { return TypeId; }
int GetId() const { return Id; }
int GetDeg() const { return InDeg + OutDeg; }
int GetInDeg() const { return InDeg; }
int GetOutDeg() const { return OutDeg; }
int GetInDeg(const int& ETypeId) const { return InTypeDegV[ETypeId]; }
int GetOutDeg(const int& ETypeId) const { return OutTypeDegV[ETypeId]; }
int GetInEId(const int& EdgeN) const {
int CumSum = 0;
int ETypeId = 0;
for (; ETypeId < InTypeDegV.Len(); ETypeId++) {
CumSum += InTypeDegV[ETypeId];
if (CumSum > EdgeN) { CumSum -= InTypeDegV[ETypeId]; break; }
}
return InEIdV[InTypeIndexV[ETypeId] + EdgeN - CumSum];
}
int GetOutEId(const int& EdgeN) const {
int CumSum = 0;
int ETypeId = 0;
for (; ETypeId < OutTypeDegV.Len(); ETypeId++) {
CumSum += OutTypeDegV[ETypeId];
if (CumSum > EdgeN) { CumSum -= OutTypeDegV[ETypeId]; break; }
}
return OutEIdV[OutTypeIndexV[ETypeId] + EdgeN - CumSum];
}
int GetNbrEId(const int& EdgeN) const { return EdgeN<GetOutDeg()?GetOutEId(EdgeN):GetInEId(EdgeN-GetOutDeg()); }
void GetInEIdV(TIntV& EIdV) const {
EIdV.Gen(InDeg, 0);
for (int ETypeId = 0; ETypeId < InTypeDegV.Len(); ETypeId++) {
for (int i = InTypeIndexV[ETypeId]; i < InTypeIndexV[ETypeId] + InTypeDegV[ETypeId]; i++) {
EIdV.Add(InEIdV[i]);
}
}
}
void GetOutEIdV(TIntV& EIdV) const {
EIdV.Gen(OutDeg, 0);
for (int ETypeId = 0; ETypeId < OutTypeDegV.Len(); ETypeId++) {
for (int i = OutTypeIndexV[ETypeId]; i < OutTypeIndexV[ETypeId] + OutTypeDegV[ETypeId]; i++) {
EIdV.Add(OutEIdV[i]);
}
}
}
//bool IsInEId(const int& EId) const { return InEIdV.SearchBin(EId) != -1; }
//bool IsOutEId(const int& EId) const { return OutEIdV.SearchBin(EId) != -1; }
void AddInETypeIds(const TIntV& ETypeIds) {
if (ETypeIds.Len() == 0) { return; }
int MxETypeId = InTypeIndexV.Len() - 1;
for (TIntV::TIter iter = ETypeIds.BegI(); iter < ETypeIds.EndI(); iter++) {
if (MxETypeId < *iter) { MxETypeId = *iter; }
}
TIntV InWeights(MxETypeId + 1);
for (int ETypeId = 0; ETypeId < InTypeDegV.Len(); ETypeId++) {
InWeights[ETypeId] = InTypeDegV[ETypeId];
}
for (TIntV::TIter iter = ETypeIds.BegI(); iter < ETypeIds.EndI(); iter++) {
InWeights[*iter] = DEF_WEIGHT;
}
RedistributeEIds(InWeights, InEIdV, InTypeIndexV, InTypeDegV);
}
void AddOutETypeIds(const TIntV& ETypeIds) {
if (ETypeIds.Len() == 0) { return; }
int MxETypeId = OutTypeIndexV.Len() - 1;
for (TIntV::TIter iter = ETypeIds.BegI(); iter < ETypeIds.EndI(); iter++) {
if (MxETypeId < *iter) { MxETypeId = *iter; }
}
TIntV OutWeights(MxETypeId + 1);
for (int ETypeId = 0; ETypeId < OutTypeDegV.Len(); ETypeId++) {
OutWeights[ETypeId] = OutTypeDegV[ETypeId];
}
for (TIntV::TIter iter = ETypeIds.BegI(); iter < ETypeIds.EndI(); iter++) {
OutWeights[*iter] = DEF_WEIGHT;
}
RedistributeEIds(OutWeights, OutEIdV, OutTypeIndexV, OutTypeDegV);
}
void AddInNbr(const int& ETypeId, const int& EId) {
int Deg = InTypeDegV[ETypeId];
int Capacity = (ETypeId == (InTypeIndexV.Len()-1)) ? InEIdV.Len() : InTypeIndexV[ETypeId+1].Val;
Capacity -= InTypeIndexV[ETypeId];
if (Deg >= Capacity) {
IAssertR(Deg == Capacity, TStr::Fmt("The node is in inconsistent state."));
TIntV Weights(InTypeDegV);
Weights[ETypeId] = (Weights[ETypeId] + 4) * DEF_EXPAND_RATIO;
RedistributeEIds(Weights, InEIdV, InTypeIndexV, InTypeDegV);
}
InEIdV[InTypeIndexV[ETypeId] + Deg] = EId;
InTypeDegV[ETypeId]++;
InDeg++;
}
void AddOutNbr(const int& ETypeId, const int& EId) {
int Deg = OutTypeDegV[ETypeId];
int Capacity = (ETypeId == (OutTypeIndexV.Len()-1)) ? OutEIdV.Len() : OutTypeIndexV[ETypeId+1].Val;
Capacity -= OutTypeIndexV[ETypeId];
if (Deg >= Capacity) {
IAssertR(Deg == Capacity, TStr::Fmt("The node is in inconsistent state."));
TIntV Weights(OutTypeDegV);
Weights[ETypeId] = (Weights[ETypeId] + 4) * DEF_EXPAND_RATIO; // + 4 to avoid 0
RedistributeEIds(Weights, OutEIdV, OutTypeIndexV, OutTypeDegV);
}
OutEIdV[OutTypeIndexV[ETypeId] + Deg] = EId;
OutTypeDegV[ETypeId]++;
OutDeg++;
}
/// Delete an edge with ID EId and type ETypeId.
void DelInNbr(const int& ETypeId, const int& EId) {
int ValN = InEIdV.SearchForw(EId, InTypeIndexV[ETypeId]);
for (int MValN=ValN+1; MValN<InTypeIndexV[ETypeId]+InTypeDegV[ETypeId]; MValN++){
InEIdV[MValN-1]=InEIdV[MValN];
}
InDeg--;
InTypeDegV[ETypeId]--;
}
void DelOutNbr(const int& ETypeId, const int& EId) {
int ValN = OutEIdV.SearchForw(EId, OutTypeIndexV[ETypeId]);
for (int MValN=ValN+1; MValN<OutTypeIndexV[ETypeId]+OutTypeDegV[ETypeId]; MValN++){
OutEIdV[MValN-1]=OutEIdV[MValN];
}
OutDeg--;
OutTypeDegV[ETypeId]--;
}
void GetInEIdV(const TInt& ETypeId, TIntV& EIdV) const {
int Sz = InTypeDegV[ETypeId].Val;
EIdV.Reserve(Sz, Sz);
int Ind = InTypeIndexV[ETypeId].Val;
for (int i = 0; i < Sz; i++) {
EIdV[i] = InEIdV[Ind+i];
}
}
void GetOutEIdV(const TInt& ETypeId, TIntV& EIdV) const {
int Sz = OutTypeDegV[ETypeId].Val;
EIdV.Reserve(Sz, Sz);
int Ind = OutTypeIndexV[ETypeId].Val;
for (int i = 0; i < Sz; i++) {
EIdV[i] = OutEIdV[Ind+i];
}
}
void GetInEIdV(const TIntV& ETypeIdV, TIntV& EIdV) const {
int Sz = 0;
for (int k = 0; k < ETypeIdV.Len(); k++) {
Sz += InTypeDegV[ETypeIdV[k]].Val;
}
EIdV.Reserve(Sz, 0);
int Ind;
for (int k = 0; k < ETypeIdV.Len(); k++) {
Ind = InTypeIndexV[ETypeIdV[k]].Val;
for (int i = 0; i < InTypeDegV[ETypeIdV[k]]; i++) {
EIdV.Add(InEIdV[Ind]);
}
}
}
void GetOutEIdV(const TIntV& ETypeIdV, TIntV& EIdV) const {
int Sz = 0;
for (int k = 0; k < ETypeIdV.Len(); k++) {
Sz += OutTypeDegV[ETypeIdV[k]].Val;
}
EIdV.Reserve(Sz, 0);
int Ind;
for (int k = 0; k < ETypeIdV.Len(); k++) {
Ind = OutTypeIndexV[ETypeIdV[k]].Val;
for (int i = 0; i < OutTypeDegV[ETypeIdV[k]]; i++) {
EIdV.Add(OutEIdV[Ind]);
}
}
}
friend class TMNet<TCVNode>;
};
//#//////////////////////////////////////////////
/// Directed multigraph with node edge attributes.
template<class TNode>
class TMNet {
public:
typedef TMNet TNet;
typedef TPt<TMNet> PNet;
public:
class TEdge {
private:
TInt TypeId, Id, SrcNId, DstNId;
public:
TEdge() : TypeId(-1), Id(-1), SrcNId(-1), DstNId(-1) { }
TEdge(const int& ETypeId, const int& EId, const int& SourceNId, const int& DestNId) : TypeId(ETypeId), Id(EId), SrcNId(SourceNId), DstNId(DestNId) { }
TEdge(const TEdge& Edge) : TypeId(Edge.TypeId), Id(Edge.Id), SrcNId(Edge.SrcNId), DstNId(Edge.DstNId) { }
TEdge(TSIn& SIn) : TypeId(SIn), Id(SIn), SrcNId(SIn), DstNId(SIn) { }
void Save(TSOut& SOut) const { TypeId.Save(SOut), Id.Save(SOut); SrcNId.Save(SOut); DstNId.Save(SOut); }
int GetTypeId() const { return TypeId; }
int GetId() const { return Id; }
int GetSrcNId() const { return SrcNId; }
int GetDstNId() const { return DstNId; }
friend class TMNet;
};
class TNodeType {
private:
TInt Id;
TStr Name;
TInt MxNId;
THash<TInt, TNode> NodeH;
public:
TNodeType() : Id(-1), Name(), MxNId(0), NodeH(){ }
TNodeType(const int& NTypeId, const TStr& NTypeName) : Id(NTypeId), Name(NTypeName), MxNId(0), NodeH(){ }
TNodeType(const TNodeType& NodeType) : Id(NodeType.Id), Name(NodeType.Name), MxNId(NodeType.MxNId), NodeH(NodeType.NodeH) { }
TNodeType(const TNodeType& NodeType, const TIntV& InETypeIdV, const TIntV& OutETypeIdV) :
Id(NodeType.Id), Name(NodeType.Name), MxNId(NodeType.MxNId), NodeH(NodeType.NodeH.Len()) {
for (typename THash<TInt,TNode>::TIter iter = NodeType.NodeH.BegI(); iter < NodeType.NodeH.EndI(); iter++) {
TNode NewNode(iter.GetDat(), InETypeIdV, OutETypeIdV);
NodeH.AddDat(iter.GetKey(), NewNode);
}
}
TNodeType(TSIn& SIn) : Id(SIn), Name(SIn), MxNId(SIn), NodeH(SIn) { }
void Save(TSOut& SOut) const { Id.Save(SOut); Name.Save(SOut); MxNId.Save(SOut); NodeH.Save(SOut); }
int GetId() const { return Id; }
TStr GetName() const { return Name; }
int GetMxNId() const { return MxNId; }
friend class TMNet;
};
/// Node iterator. Only forward iteration (operator++) is supported.
template<class TEdge>
class TMNodeI {
private:
typedef typename THash<TInt, TNode>::TIter THashIter;
typedef typename TVec<TNodeType>::TIter TTypeIter;
TTypeIter VecI;
TTypeIter VecEndI;
THashIter HashI;
const TMNet *Graph;
private:
THashIter VecElemBegI() {
return (*VecI).NodeH.BegI();
}
void FindNextNonEmptyHashI() {
while (HashI.IsEnd() && VecI < VecEndI) {
VecI++;
HashI = VecElemBegI();
}
}
public:
TMNodeI() : VecI(), VecEndI(), HashI(), Graph(NULL) { }
TMNodeI(const TTypeIter& TypeIter, const THashIter& NodeIter, const TMNet* GraphPt)
: VecI(TypeIter), VecEndI(GraphPt->TypeNodeV.EndI()), HashI(NodeIter), Graph(GraphPt) { }
TMNodeI(const TTypeIter& TypeIter, const TMNet* GraphPt) : VecI(TypeIter), VecEndI(GraphPt->TypeNodeV.EndI()), Graph(GraphPt) {
if (VecI < VecEndI) {
HashI = VecElemBegI();
FindNextNonEmptyHashI();
} else {
HashI = THashIter();
}
}
TMNodeI(const TMNodeI& NodeI) : VecI(NodeI.VecI), VecEndI(NodeI.VecEndI), HashI(NodeI.HashI), Graph(NodeI.Graph) { }
TMNodeI& operator = (const TMNodeI& NodeI) { VecI=NodeI.VecI; VecEndI=NodeI.VecEndI; HashI=NodeI.HashI; Graph=NodeI.Graph; return *this; }
/// Increment iterator.
TMNodeI& operator++ (int) {
HashI++;
FindNextNonEmptyHashI();
return *this;
}
bool operator < (const TMNodeI& NodeI) const { return VecI < NodeI.VecI || HashI < NodeI.HashI; }
bool operator == (const TMNodeI& NodeI) const { return VecI == NodeI.VecI && HashI == NodeI.HashI; }
/// Returns ID of the current node.
int GetId() const { return HashI.GetDat().GetId(); }
/// Returns the type-wise ID of the current node.
int GetLocalId() const { return TMNet::GetLocalNId(GetId()); }
/// Returns type ID of the current node.
int GetTypeId() const { return HashI.GetDat().GetTypeId(); }
/// Returns degree of the current node, the sum of in-degree and out-degree.
int GetDeg() const { return HashI.GetDat().GetDeg(); }
/// Returns in-degree of the current node.
int GetInDeg() const { return HashI.GetDat().GetInDeg(); }
/// Returns out-degree of the current node.
int GetOutDeg() const { return HashI.GetDat().GetOutDeg(); }
/// Returns ID of EdgeN-th in-node (the node pointing to the current node).
int GetInNId(const int& EdgeN) const { return Graph->GetEdge(HashI.GetDat().GetInEId(EdgeN)).GetSrcNId(); }
/// Returns ID of EdgeN-th out-node (the node the current node points to).
int GetOutNId(const int& EdgeN) const { return Graph->GetEdge(HashI.GetDat().GetOutEId(EdgeN)).GetDstNId(); }
/// Returns ID of EdgeN-th neighboring node.
int GetNbrNId(const int& EdgeN) const { const TEdge& E = Graph->GetEdge(HashI.GetDat().GetNbrEId(EdgeN)); return GetId()==E.GetSrcNId() ? E.GetDstNId():E.GetSrcNId(); }
/// Tests whether node with ID NId points to the current node.
bool IsInNId(const int& NId) const {
const TNode& Node = HashI.GetDat();
for (int edge = 0; edge < Node.GetInDeg(); edge++) {
if (NId == Graph->GetEdge(Node.GetInEId(edge)).GetSrcNId()) { return true; }
}
return false;
}
/// Tests whether the current node points to node with ID NId.
bool IsOutNId(const int& NId) const {
const TNode& Node = HashI.GetDat();
for (int edge = 0; edge < Node.GetOutDeg(); edge++) {
if (NId == Graph->GetEdge(Node.GetOutEId(edge)).GetDstNId()) { return true; }
}
return false;
}
/// Tests whether node with ID NId is a neighbor of the current node.
bool IsNbrNId(const int& NId) const { return IsOutNId(NId) || IsInNId(NId); }
/// Returns ID of EdgeN-th in-edge.
int GetInEId(const int& EdgeN) const { return HashI.GetDat().GetInEId(EdgeN); }
/// Returns ID of EdgeN-th out-edge.
int GetOutEId(const int& EdgeN) const { return HashI.GetDat().GetOutEId(EdgeN); }
/// Returns ID of EdgeN-th in or out-edge.
int GetNbrEId(const int& EdgeN) const { return HashI.GetDat().GetNbrEId(EdgeN); }
/// Tests whether the edge with ID EId is an in-edge of current node.
bool IsInEId(const int& EId) const { return HashI.GetDat().IsInEId(EId); }
/// Tests whether the edge with ID EId is an out-edge of current node.
bool IsOutEId(const int& EId) const { return HashI.GetDat().IsOutEId(EId); }
/// Tests whether the edge with ID EId is an in or out-edge of current node.
bool IsNbrEId(const int& EId) const { return IsInEId(EId) || IsOutEId(EId); }
/*
/// Gets vector of attribute names.
void GetAttrNames(TStrV& Names) const { Graph->AttrNameNI(GetId(), Names); }
/// Gets vector of attribute values.
void GetAttrVal(TStrV& Val) const { Graph->AttrValueNI(GetId(), Val); }
/// Gets vector of int attribute names.
void GetIntAttrNames(TStrV& Names) const { Graph->IntAttrNameNI(GetId(), Names); }
/// Gets vector of int attribute values.
void GetIntAttrVal(TIntV& Val) const { Graph->IntAttrValueNI(GetId(), Val); }
/// Gets vector of str attribute names.
void GetStrAttrNames(TStrV& Names) const { Graph->StrAttrNameNI(GetId(), Names); }
/// Gets vector of str attribute values.
void GetStrAttrVal(TStrV& Val) const { Graph->StrAttrValueNI(GetId(), Val); }
/// Gets vector of flt attribute names.
void GetFltAttrNames(TStrV& Names) const { Graph->FltAttrNameNI(GetId(), Names); }
/// Gets vector of flt attribute values.
void GetFltAttrVal(TFltV& Val) const { Graph->FltAttrValueNI(GetId(), Val); }
*/
};
typedef TMNodeI<TEdge> TNodeI;
/// Edge iterator. Only forward iteration (operator++) is supported.
class TEdgeI {
private:
typedef typename THash<TInt, TEdge>::TIter THashIter;
THashIter EdgeHI;
const TMNet *Graph;
public:
TEdgeI() : EdgeHI(), Graph(NULL) { }
TEdgeI(const THashIter& EdgeHIter, const TMNet *GraphPt) : EdgeHI(EdgeHIter), Graph(GraphPt) { }
TEdgeI(const TEdgeI& EdgeI) : EdgeHI(EdgeI.EdgeHI), Graph(EdgeI.Graph) { }
TEdgeI& operator = (const TEdgeI& EdgeI) { if (this!=&EdgeI) { EdgeHI=EdgeI.EdgeHI; Graph=EdgeI.Graph; } return *this; }
/// Increment iterator.
TEdgeI& operator++ (int) { EdgeHI++; return *this; }
bool operator < (const TEdgeI& EdgeI) const { return EdgeHI < EdgeI.EdgeHI; }
bool operator == (const TEdgeI& EdgeI) const { return EdgeHI == EdgeI.EdgeHI; }
/// Returns edge ID.
int GetId() const { return EdgeHI.GetDat().GetId(); }
/// Returns edge's type ID
int GetTypeId() const { return EdgeHI.GetDat().GetTypeId(); }
/// Returns the source of the edge.
int GetSrcNId() const { return EdgeHI.GetDat().GetSrcNId(); }
/// Returns the destination of the edge.
int GetDstNId() const { return EdgeHI.GetDat().GetDstNId(); }
/*
/// Gets vector of attribute names.
void GetAttrNames(TStrV& Names) const { Graph->AttrNameEI(GetId(), Names); }
/// Gets vector of attribute values.
void GetAttrVal(TStrV& Val) const { Graph->AttrValueEI(GetId(), Val); }
/// Gets vector of int attribute names.
void GetIntAttrNames(TStrV& Names) const { Graph->IntAttrNameEI(GetId(), Names); }
/// Gets vector of int attribute values.
void GetIntAttrVal(TIntV& Val) const { Graph->IntAttrValueEI(GetId(), Val); }
/// Gets vector of str attribute names.
void GetStrAttrNames(TStrV& Names) const { Graph->StrAttrNameEI(GetId(), Names); }
/// Gets vector of str attribute values.
void GetStrAttrVal(TStrV& Val) const { Graph->StrAttrValueEI(GetId(), Val); }
/// Gets vector of flt attribute names.
void GetFltAttrNames(TStrV& Names) const { Graph->FltAttrNameEI(GetId(), Names); }
/// Gets vector of flt attribute values.
void GetFltAttrVal(TFltV& Val) const { Graph->FltAttrValueEI(GetId(), Val); }
*/
friend class TMNet;
};
private:
static const int NTYPEID_NBITS = 3; // The number of types must be at most 2^NTYPEID_NBITS
static const int NTYPEID_FLAG = (1 << NTYPEID_NBITS) - 1;
static int GetGlobalNId(const int& NTypeId, const int& NId) { return (NId << NTYPEID_NBITS) + NTypeId;}
private:
TCRef CRef;
TInt MxNId;
TInt MxEId;
THash<TStr, int> NTypeH;
THash<TStr, int> ETypeH;
TVec<TNodeType> TypeNodeV;
TIntV EdgeSzV; // maintain the number of edges of each type
THash<TInt, TEdge> EdgeH;
int Sz;
TVec<TIntV> InETypes;
TVec<TIntV> OutETypes;
/// KeyToIndexType[N|E]: Key->(Type,Index).
TStrIntPrH KeyToIndexTypeN, KeyToIndexTypeE;
enum { IntType, StrType, FltType };
private:
TNode& GetNode(const int&NId) {
int NTypeId = GetNTypeId(NId);
int LocalNId = GetLocalNId(NId);
return GetNode(NTypeId, LocalNId);
}
const TNode& GetNode(const int&NId) const {
int NTypeId = GetNTypeId(NId);
int LocalNId = GetLocalNId(NId);
return GetNode(NTypeId, LocalNId);
}
TNode& GetNode(const int& NTypeId, const int& NId) { return TypeNodeV[NTypeId].NodeH.GetDat(NId); }
const TNode& GetNode(const int& NTypeId, const int& NId) const { return TypeNodeV[NTypeId].NodeH.GetDat(NId); }
TEdge& GetEdge(const int& EId) { return EdgeH.GetDat(EId); }
const TEdge& GetEdge(const int& EId) const { return EdgeH.GetDat(EId); }
void AssertNTypeId(const int NTypeId) const {
IAssertR(IsNTypeId(NTypeId), TStr::Fmt("NodeTypeId %d does not exist", NTypeId));
}
public:
TMNet() : CRef(), MxEId(0), NTypeH(), ETypeH(), TypeNodeV(), EdgeH(), Sz(0), InETypes(), OutETypes(),
KeyToIndexTypeN(), KeyToIndexTypeE() { }
TMNet(const TMNet& Graph) : MxEId(Graph.MxEId),
NTypeH(Graph.NTypeH), ETypeH(Graph.ETypeH), TypeNodeV(Graph.TypeNodeV), EdgeH(Graph.EdgeH), Sz(Graph.Sz),
InETypes(Graph.InETypes), OutETypes(Graph.OutETypes), KeyToIndexTypeN(), KeyToIndexTypeE() { }
/// Static cons returns pointer to graph. Ex: PNEANet Graph=TNEANet::New().
static TPt<TMNet<TNode> > New() {
return TPt<TMNet<TNode> >(new TMNet());
}
TMNet& operator = (const TMNet& Graph) { if (this!=&Graph) {
MxEId=Graph.MxEId; NTypeH=Graph.NTypeH; ETypeH=Graph.ETypeH; TypeNodeV=Graph.TypeNodeV; EdgeH=Graph.EdgeH;
Sz=Graph.Sz; InETypes=Graph.InETypes; OutETypes=Graph.OutETypes;
KeyToIndexTypeN=Graph.KeyToIndexTypeN; KeyToIndexTypeE=Graph.KeyToIndexTypeE;}
return *this; }
bool HasFlag(const TGraphFlag& Flag) const {
if (Flag == gfDirected) { return true; }
else if (Flag == gfMultiGraph) { return true; }
else return false;
}
/// Gets the NTypeId
static int GetNTypeId(const int& NId) { return NId & NTYPEID_FLAG; } // Assuming that GlobalNId is positive here
static int GetLocalNId(const int& GlobalNId) { return GlobalNId >> NTYPEID_NBITS; }
/// Returns an ID that is larger than any node type ID in the network.
int GetMxNTypeId() const { return TypeNodeV.Len(); }
/// Adds a new type with the given string into the graph.
int AddNType(const TStr& NTypeName) {
int KeyId = NTypeH.GetKeyId(NTypeName);
// Has the type been added?
if (KeyId == -1) {
// Not added.
int NTypeId = GetMxNTypeId();
NTypeH.AddDat(NTypeName, NTypeId);
TypeNodeV.Add(TNodeType(NTypeId, NTypeName));
IAssertR(NTypeId == InETypes.Len(), TStr::Fmt("InETypes has inconsistent length."));
IAssertR(NTypeId == OutETypes.Len(), TStr::Fmt("OutETypes has inconsistent length."));
InETypes.Add(TIntV());
OutETypes.Add(TIntV());
return NTypeId;
} else {
// Added. Return the stored id.
TStr TempKey;
int NTypeId;
NTypeH.GetKeyDat(KeyId, TempKey, NTypeId);
return NTypeId;
}
}
/// Gets the typeId of a type
int GetNTypeId(const TStr& NTypeStr) { return NTypeH.GetDat(NTypeStr); }
/// Gets the type name
TStr GetNTypeName(const int NTypeId) {
AssertNTypeId(NTypeId);
return TypeNodeV[NTypeId].Name;
}
/// Validates the TypeId
bool IsNTypeId(const int NTypeId) const { return NTypeId >= 0 && NTypeId < TypeNodeV.Len(); }
/// Returns the number of nodes in the graph.
int GetNodes() const { return Sz; }
/// Returns the number of nodes of a specific type in the graph.
int GetNodes(const int& NTypeId) const { return TypeNodeV[NTypeId].NodeH.Len(); }
/// Returns an ID that is larger than any node ID in the network.
int GetMxNId() const { return MxNId; }
/// Returns an ID that is larger than any node ID of the given type in the network.
int GetMxNId(const int& NTypeId) const {
AssertNTypeId(NTypeId);
return TypeNodeV[NTypeId].MxNId;
}
/// Adds a node of ID NId to the graph.
int AddNode(const int& NTypeId, int NId = -1) {
AssertNTypeId(NTypeId);
TNodeType* NodeType = &TypeNodeV[NTypeId];
if (NId == -1) {
NId = NodeType->MxNId; NodeType->MxNId++;
} else {
IAssertR(!IsNode(NTypeId, NId), TStr::Fmt("NodeId %d with type %d already exists", NId, NTypeId));
NodeType->MxNId = TMath::Mx(NId+1, NodeType->GetMxNId());
}
TNode NewNode(NTypeId, GetGlobalNId(NTypeId, NId));
NewNode.AddInETypeIds(InETypes[NTypeId]);
NewNode.AddOutETypeIds(OutETypes[NTypeId]);
NodeType->NodeH.AddDat(NId, NewNode);
int GlobalNId = GetGlobalNId(NTypeId, NId);
MxNId = TMath::Mx(GlobalNId+1, MxNId());
Sz++;
return GlobalNId;
}
int AddNode(const TStr& NTypeStr) { return AddNode(GetNTypeId(NTypeStr)); }
/// Adds a node of ID NodeI.GetId() to the graph.
int AddNode(const TNodeI& NodeId) { return AddNode(NodeId.GetTypeId(), NodeId.GetId()); }
/// Validates the global NId
bool IsNode(const int& NId) const { return IsNode(GetNTypeId(NId), GetLocalNId(NId)); }
/// Validates the NTypeId and NId
bool IsNode(const int& NTypeId, const int& NId) const {
if (!IsNTypeId(NTypeId)) { return false; }
return TypeNodeV[NTypeId].NodeH.IsKey(NId);
}
void DelNode(const int& NTypeId, const int& NId) {
const TNode& Node = GetNode(NTypeId, NId);
TIntV EIdV;
Node.GetOutEIdV(EIdV);
for (int out = 0; out < EIdV.Len(); out++) {
DelEdge(EIdV[out]);
}
Node.GetInEIdV(EIdV);
for (int in = 0; in < EIdV.Len(); in++) {
DelEdge(EIdV[in]);
}
TypeNodeV[NTypeId].NodeH.DelKey(NId);
Sz--;
}
/// Deletes node of ID NId from the graph.
void DelNode(const int& NId) { DelNode(GetNTypeId(NId), GetLocalNId(NId)); }
/// Deletes node of ID NodeI.GetId() from the graph.
void DelNode(const TNode& NodeI) { DelNode(NodeI.GetTypeId(), NodeI.GetId()); }
/// Returns an iterator referring to the first node in the graph.
TNodeI BegNI() const {
return TNodeI(TypeNodeV.BegI(), this);
}
/// Returns an iterator referring to the past-the-end node in the graph.
TNodeI EndNI() const { return TNodeI(TypeNodeV.EndI(), this); }
/// Returns an iterator referring to the node of ID NId in the graph.
TNodeI GetNI(const int& NId) const {
int NTypeId = GetNTypeId(NId);
int LocalNId = GetLocalNId(NId);
return GetNI(NTypeId, LocalNId);
}
TNodeI BegNI(const int& NTypeId) const { return TNodeI(TypeNodeV.GetI(NTypeId), this); }
TNodeI EndNI(const int& NTypeId) const { return TNodeI(TypeNodeV.GetI(NTypeId), TypeNodeV[NTypeId].NodeH.EndI(), this); }
TNodeI GetNI(const int& NTypeId, const int& NId) const {
return TNodeI(TypeNodeV.GetI(NTypeId), TypeNodeV[NTypeId].NodeH.GetI(NId), this);
}
void GetNIdV(TIntV& NIdV) const {
NIdV.Gen(GetNodes(), 0);
for (TNodeI NI = BegNI(); NI < EndNI(); NI++) {
NIdV.Add(NI.GetId());
}
}
int AddEType(const TStr& ETypeName, const TStr& SrcNTypeName, const TStr& DstNTypeName) {
int KeyId = ETypeH.GetKeyId(ETypeName);
// Has the type been added?
if (KeyId == -1) {
// Not added.
int ETypeId = ETypeH.Len();
ETypeH.AddDat(ETypeName, ETypeId);
InETypes[GetNTypeId(DstNTypeName)].Add(ETypeId);
OutETypes[GetNTypeId(SrcNTypeName)].Add(ETypeId);
EdgeSzV.Reserve(ETypeId+1, ETypeId+1);
EdgeSzV[ETypeId] = 0;
return ETypeId;
} else {
// Added. Return the stored id.
TStr TempKey;
int ETypeId;
ETypeH.GetKeyDat(KeyId, TempKey, ETypeId);
return ETypeId;
}
}
/// Gets the typeId of an edge type
int GetETypeId(const TStr& ETypeStr) { return ETypeH.GetDat(ETypeStr); }
/// Returns an ID that is larger than any edge ID in the network.
int GetMxEId() const { return MxEId; }
/// Returns the number of edges in the graph.
int GetEdges() const { return EdgeH.Len(); }
/// Returns the number of edges of a specific type in the graph.
int GetEdges(const int& ETypeId) const { return EdgeSzV[ETypeId].Val; }
/// Adds an edge with ID EId between node IDs SrcNId and DstNId to the graph.
int AddEdge(const int& SrcNId, const int& DstNId, const int& ETypeId, int EId = -1) {
if (EId == -1) { EId = MxEId; MxEId++; }
else { MxEId = TMath::Mx(EId+1, MxEId()); }
IAssertR(!IsEdge(EId), TStr::Fmt("EdgeId %d already exists", EId));
IAssertR(IsNode(SrcNId) && IsNode(DstNId), TStr::Fmt("%d or %d not a node.", SrcNId, DstNId).CStr());
EdgeH.AddDat(EId, TEdge(ETypeId, EId, SrcNId, DstNId));
GetNode(SrcNId).AddOutNbr(ETypeId, EId);
GetNode(DstNId).AddInNbr(ETypeId, EId);
EdgeSzV[ETypeId] += 1;
return EId;
}
int AddEdge(const int& SrcNId, const int& DstNId, const TStr& ETypeStr) { return AddEdge(SrcNId, DstNId, GetETypeId(ETypeStr)); }
/// Adds an edge between EdgeI.GetSrcNId() and EdgeI.GetDstNId() to the graph.
int AddEdge(const TEdgeI& EdgeI) { return AddEdge(EdgeI.GetSrcNId(), EdgeI.GetDstNId(), EdgeI.GetTypeId(), EdgeI.GetId()); }
/// Deletes an edge with edge ID EId from the graph.
void DelEdge(const int& EId) {
IAssert(IsEdge(EId));
TEdge Edge = GetEdge(EId);
int ETypeId = Edge.GetTypeId();
const int SrcNId = Edge.GetSrcNId();
const int DstNId = Edge.GetDstNId();
GetNode(SrcNId).DelOutNbr(ETypeId, EId);
GetNode(DstNId).DelInNbr(ETypeId, EId);
EdgeH.DelKey(EId);
EdgeSzV[ETypeId] -= 1;
}
/// Deletes all edges between node IDs SrcNId and DstNId from the graph.
void DelEdge(const int& SrcNId, const int& DstNId, const bool& IsDir = true) {
int EId;
IAssert(IsEdge(SrcNId, DstNId, EId, IsDir)); // there is at least one edge
while (IsEdge(SrcNId, DstNId, EId, IsDir)) {
DelEdge(EId);
}
}
/// Tests whether an edge with edge ID EId exists in the graph.
bool IsEdge(const int& EId) const { return EdgeH.IsKey(EId); }
/// Tests whether an edge between node IDs SrcNId and DstNId exists in the graph.
bool IsEdge(const int& SrcNId, const int& DstNId, const bool& IsDir = true) const { int EId; return IsEdge(SrcNId, DstNId, EId, IsDir); }
/// Tests whether an edge between node IDs SrcNId and DstNId exists in the graph. if an edge exists, return its edge ID in EId
bool IsEdge(const int& SrcNId, const int& DstNId, int& EId, const bool& IsDir = true) const {
const TNode& SrcNode = GetNode(SrcNId);
for (int edge = 0; edge < SrcNode.GetOutDeg(); edge++) {
const TEdge& Edge = GetEdge(SrcNode.GetOutEId(edge));
if (DstNId == Edge.GetDstNId()) {
EId = Edge.GetId();
return true;
}
}
if (! IsDir) {
for (int edge = 0; edge < SrcNode.GetInDeg(); edge++) {
const TEdge& Edge = GetEdge(SrcNode.GetInEId(edge));
if (DstNId == Edge.GetSrcNId()) {
EId = Edge.GetId();
return true;
}
}
}
return false;
}
/// Returns an edge ID between node IDs SrcNId and DstNId, if such an edge exists. Otherwise, return -1.
int GetEId(const int& SrcNId, const int& DstNId) const { int EId; return IsEdge(SrcNId, DstNId, EId)?EId:-1; }
/// Returns an iterator referring to the first edge in the graph.
TEdgeI BegEI() const { return TEdgeI(EdgeH.BegI(), this); }
/// Returns an iterator referring to the past-the-end edge in the graph.
TEdgeI EndEI() const { return TEdgeI(EdgeH.EndI(), this); }
/// Returns an iterator referring to edge with edge ID EId.
TEdgeI GetEI(const int& EId) const { return TEdgeI(EdgeH.GetI(EId), this); }
/// Returns an iterator referring to edge (SrcNId, DstNId) in the graph.
TEdgeI GetEI(const int& SrcNId, const int& DstNId) const { return GetEI(GetEId(SrcNId, DstNId)); }
/// Returns an ID of a random node in the graph.
int GetRndNId(TRnd& Rnd=TInt::Rnd) {
int RandN = Rnd.GetUniDevInt(Sz);
int Ct = 0;
int NTypeId = 0;
for (; NTypeId < TypeNodeV.Len(); NTypeId++) {
Ct += TypeNodeV[NTypeId].NodeH.Len();
if (Ct > RandN) { break; }
}
return GetRndNId(NTypeId, Rnd);
}
/// Returns an iterator referring to a random node in the graph.
TNodeI GetRndNI(TRnd& Rnd=TInt::Rnd) { return GetNI(GetRndNId(Rnd)); }
/// Returns an ID of a random node with the given type in the graph.
int GetRndNId(const int& NTypeId, TRnd& Rnd=TInt::Rnd) {
return TypeNodeV[NTypeId].NodeH.GetKey(TypeNodeV[NTypeId].NodeH.GetRndKeyId(Rnd, 0.8));
}
/// Returns an iterator referring to a random node with the given type in the graph.
TNodeI GetRndNI(const int& NTypeId, TRnd& Rnd=TInt::Rnd) { return GetNI(GetRndNId(NTypeId, Rnd)); }
/// Returns an ID of a random edge in the graph.
int GetRndEId(TRnd& Rnd=TInt::Rnd) { return EdgeH.GetKey(EdgeH.GetRndKeyId(Rnd, 0.8)); }
/// Returns an iterator referring to a random edge in the graph.
TEdgeI GetRndEI(TRnd& Rnd=TInt::Rnd) { return GetEI(GetRndEId(Rnd)); }
/*
/// Tests whether the graph is empty (has zero nodes).
bool Empty() const { return GetNodes()==0; }
/// Deletes all nodes and edges from the graph.
void Clr() { MxNId=0; MxEId=0; NodeH.Clr(); EdgeH.Clr(),
KeyToIndexTypeN.Clr(), KeyToIndexTypeE.Clr(), IntDefaultsN.Clr(), IntDefaultsE.Clr(),
StrDefaultsN.Clr(), StrDefaultsE.Clr(), FltDefaultsN.Clr(), FltDefaultsE.Clr(),
VecOfIntVecsN.Clr(), VecOfIntVecsE.Clr(), VecOfStrVecsN.Clr(), VecOfStrVecsE.Clr(),
VecOfFltVecsN.Clr(), VecOfFltVecsE.Clr();}
/// Reserves memory for a graph of Nodes nodes and Edges edges.
void Reserve(const int& Nodes, const int& Edges) {
if (Nodes>0) { NodeH.Gen(Nodes/2); } if (Edges>0) { EdgeH.Gen(Edges/2); } }
/// Defragments the graph.
void Defrag(const bool& OnlyNodeLinks=false);
/// Checks the graph data structure for internal consistency.
bool IsOk(const bool& ThrowExcept=true) const;
/// Print the graph in a human readable form to an output stream OutF.
void Dump(FILE *OutF=stdout) const;
// Get the sum of the weights of all the outgoing edges of the node.
TFlt GetWeightOutEdges(const TNodeI& NI, const TStr& attr);
// Check if there is an edge attribute with name attr.
bool IsFltAttrE(const TStr& attr);
// Check if there is an edge attribute with name attr.
bool IsIntAttrE(const TStr& attr);
// Check if there is an edge attribute with name attr.
bool IsStrAttrE(const TStr& attr);
// Get Vector for the Flt Attribute attr.
TVec<TFlt>& GetFltAttrVecE(const TStr& attr);
// Get keyid for edge with id EId.
int GetFltKeyIdE(const int& EId);
//Fills OutWeights with the outgoing weight from each node.
void GetWeightOutEdgesV(TFltV& OutWeights, const TFltV& AttrVal) ;
*/
TPt<TMNet<TNode> > GetSubGraph(const TIntV& NTypeIdV) {
TPt<TMNet<TNode> > PNewGraph = New();
TMNet<TNode>& NewGraph = *PNewGraph;
TIntSet NTypeIdSet(NTypeIdV);
// Initialize node types
for (typename THash<TStr,int>::TIter iter = NTypeH.BegI(); iter < NTypeH.EndI(); iter++) {
if (NTypeIdSet.IsKey(TInt(iter.GetDat()))) { NewGraph.NTypeH.AddDat(iter.GetKey(), iter.GetDat()); }
}
// Find relevant edges
TIntIntH EdgeCounter;
for (int i = 0; i < InETypes.Len(); i++) {
if (!NTypeIdSet.IsKey(TInt(i))) { continue; }
for (int j = 0; j < InETypes[i].Len(); j++) {
EdgeCounter.AddDat(InETypes[i][j], TInt(1));
}
}
for (int i = 0; i < OutETypes.Len(); i++) {
if (!NTypeIdSet.IsKey(TInt(i))) { continue; }
for (int j = 0; j < OutETypes[i].Len(); j++) {
if (EdgeCounter.IsKey(OutETypes[i][j])) { EdgeCounter.AddDat(OutETypes[i][j], TInt(2)); }
}
}
TIntSet ETypeIdSet;
for (typename TIntIntH::TIter iter = EdgeCounter.BegI(); iter < EdgeCounter.EndI(); iter++) {
if (iter.GetDat().Val == 2) { ETypeIdSet.AddKey(iter.GetKey()); }
}
for (typename THash<TStr,int>::TIter iter = ETypeH.BegI(); iter < ETypeH.EndI(); iter++) {
if (ETypeIdSet.IsKey(TInt(iter.GetDat()))) { NewGraph.ETypeH.AddDat(iter.GetKey(), iter.GetDat()); }
}
NewGraph.InETypes.Gen(InETypes.Len());
for (int i = 0; i < InETypes.Len(); i++) {
for (int j = 0; j < InETypes[i].Len(); j++) {
int ETypeId = InETypes[i][j];
if (ETypeIdSet.IsKey(ETypeId)) { NewGraph.InETypes[i].Add(ETypeId); }
}
}
NewGraph.OutETypes.Gen(OutETypes.Len());
for (int i = 0; i < OutETypes.Len(); i++) {
for (int j = 0; j < OutETypes[i].Len(); j++) {
int ETypeId = OutETypes[i][j];
if (ETypeIdSet.IsKey(ETypeId)) { NewGraph.OutETypes[i].Add(ETypeId); }
}
}
NewGraph.Sz = 0;
NewGraph.TypeNodeV.Gen(TypeNodeV.Len());
for (int NTypeId = 0; NTypeId < TypeNodeV.Len(); NTypeId++) {
if (NTypeIdSet.IsKey(NTypeId)) {
NewGraph.TypeNodeV[NTypeId] = TNodeType(TypeNodeV[NTypeId], NewGraph.InETypes[NTypeId], NewGraph.OutETypes[NTypeId]);
NewGraph.Sz += NewGraph.TypeNodeV[NTypeId].NodeH.Len();
} else {
NewGraph.TypeNodeV[NTypeId] = TNodeType(TypeNodeV[NTypeId].GetId(), TypeNodeV[NTypeId].GetName());
}
}
NewGraph.MxNId = MxNId;
int MaxEId = 0;
for (TEdgeI iter = BegEI(); iter < EndEI(); iter++) {
if (!ETypeIdSet.IsKey(iter.GetTypeId())) { continue; }
int EId = iter.GetId();
NewGraph.EdgeH.AddDat(EId, TEdge(iter.GetTypeId(), EId, iter.GetSrcNId(), iter.GetDstNId()));
if (MaxEId < EId) { MaxEId = EId; }
}
NewGraph.MxEId = MaxEId + 1;
return PNewGraph;
}
TPt<TMNet<TNode> > GetSubGraph(const TStrV& NTypeNameV) {
TIntV NTypeIdV;
for (int i = 0; i < NTypeNameV.Len(); i++) {
NTypeIdV.Add(NTypeH.GetDat(NTypeNameV[i]));
}
return GetSubGraph(NTypeIdV);
}
PNEANet GetSubGraphTNEANet(const TIntV& NTypeIdV) {
// Find relevant edge types
TIntSet NTypeIdSet(NTypeIdV);
TIntIntH EdgeCounter;
for (int i = 0; i < InETypes.Len(); i++) {
if (!NTypeIdSet.IsKey(TInt(i))) { continue; }
for (int j = 0; j < InETypes[i].Len(); j++) {
EdgeCounter.AddDat(InETypes[i][j], TInt(1));
}
}
for (int i = 0; i < OutETypes.Len(); i++) {
if (!NTypeIdSet.IsKey(TInt(i))) { continue; }
for (int j = 0; j < OutETypes[i].Len(); j++) {
if (EdgeCounter.IsKey(OutETypes[i][j])) { EdgeCounter.AddDat(OutETypes[i][j], TInt(2)); }
}
}
TIntV ETypeIdV;
for (typename TIntIntH::TIter iter = EdgeCounter.BegI(); iter < EdgeCounter.EndI(); iter++) {
if (iter.GetDat().Val == 2) {
ETypeIdV.Add(iter.GetKey());
}
}
return GetSubGraphTNEANet2(NTypeIdV, ETypeIdV);
}
/// Extracts the subgraph by scanning and filtering all edges (edge-based)
PNEANet GetSubGraphTNEANet(const TIntV& NTypeIdV, const TIntV& ETypeIdV) {
PNEANet PNewGraph = PNEANet::New();
for (int i = 0; i < NTypeIdV.Len(); i++) {
TInt NTypeId = NTypeIdV[i];
for (typename THash<TInt,TNode>::TIter iter = TypeNodeV[NTypeId].NodeH.BegI(); iter < TypeNodeV[NTypeId].NodeH.EndI(); iter++) {
PNewGraph->AddNode(GetGlobalNId(NTypeId, iter.GetKey().Val));
}
}
TIntSet ETypeIdSet(ETypeIdV);
// Add edges
for (TEdgeI iter = BegEI(); iter < EndEI(); iter++) {
if (ETypeIdSet.IsKey(iter.GetTypeId())) {
PNewGraph->AddEdge(iter.GetSrcNId(), iter.GetDstNId(), iter.GetId());
}
}
return PNewGraph;
}
/// Extracts the subgraph by finding the nodes and then finding all neighbors (node-based)
PNEANet GetSubGraphTNEANet2(const TIntV& NTypeIdV, const TIntV& ETypeIdV) {
PNEANet PNewGraph = PNEANet::New();
// Add nodes
for (int i = 0; i < NTypeIdV.Len(); i++) {
TInt NTypeId = NTypeIdV[i];
for (typename THash<TInt,TNode>::TIter iter = TypeNodeV[NTypeId].NodeH.BegI(); iter < TypeNodeV[NTypeId].NodeH.EndI(); iter++) {
PNewGraph->AddNode(GetGlobalNId(NTypeId, iter.GetKey().Val));
}
}
// Add edges
TIntSet ETypeIdSet(ETypeIdV);
TIntV EIdV; // Use same vector to save memory
for (int i = 0; i < NTypeIdV.Len(); i++) {
TInt NTypeId = NTypeIdV[i];
TIntV* POutETypes = &(OutETypes[NTypeId]);
TIntV OutETypeIdV;
for (TIntV::TIter iter = POutETypes->BegI(); iter < POutETypes->EndI(); iter++) {
if (ETypeIdSet.IsKey(*iter)) { OutETypeIdV.Add(*iter); }
}
for (typename THash<TInt,TNode>::TIter iter = TypeNodeV[NTypeId].NodeH.BegI(); iter < TypeNodeV[NTypeId].NodeH.EndI(); iter++) {
TNode* PNode = &(iter.GetDat());
for (int j = 0; j < OutETypeIdV.Len(); j++) {
PNode->GetOutEIdV(OutETypeIdV.GetVal(j).Val, EIdV);
for (int k = 0; k < EIdV.Len(); k++) {
TInt EId = EIdV[k];
PNewGraph->AddEdge(PNode->GetId(), GetEdge(EId).GetDstNId(), EId);
}
}
}
}
return PNewGraph;
}
#ifdef GCC_ATOMIC
PNEANetMP GetSubGraphTNEANetMP2(const TIntV& NTypeIdV) {
TStopwatch* Sw = TStopwatch::GetInstance();
Sw->Start(TStopwatch::ComputeETypes);
// Find relevant edge types
TIntSet NTypeIdSet(NTypeIdV);
TIntIntH EdgeCounter;
for (int i = 0; i < InETypes.Len(); i++) {
if (!NTypeIdSet.IsKey(TInt(i))) { continue; }
for (int j = 0; j < InETypes[i].Len(); j++) {
EdgeCounter.AddDat(InETypes[i][j], TInt(1));
}
}
for (int i = 0; i < OutETypes.Len(); i++) {
if (!NTypeIdSet.IsKey(TInt(i))) { continue; }
for (int j = 0; j < OutETypes[i].Len(); j++) {
if (EdgeCounter.IsKey(OutETypes[i][j])) { EdgeCounter.AddDat(OutETypes[i][j], TInt(2)); }
}
}
TIntV ETypeIdV;
for (typename TIntIntH::TIter iter = EdgeCounter.BegI(); iter < EdgeCounter.EndI(); iter++) {
if (iter.GetDat().Val == 2) {
ETypeIdV.Add(iter.GetKey());
}
}
Sw->Stop(TStopwatch::ComputeETypes);
return GetSubGraphTNEANetMP2(NTypeIdV, ETypeIdV);
}
/// Extracts the subgraph by finding the nodes and then finding all neighbors (node-based)
PNEANetMP GetSubGraphTNEANetMP(const TIntV& NTypeIdV, const TIntV& ETypeIdV) {
TStopwatch* Sw = TStopwatch::GetInstance();
Sw->Start(TStopwatch::EstimateSizes);
int SubgraphSz = 0;
for (TIntV::TIter iter = NTypeIdV.BegI(); iter < NTypeIdV.EndI(); iter++) {
SubgraphSz += GetNodes((*iter).Val);
}
int SubgraphEdgeSz = 0;
for (TIntV::TIter iter = ETypeIdV.BegI(); iter < ETypeIdV.EndI(); iter++) {
SubgraphEdgeSz += GetEdges((*iter).Val);
}
Sw->Stop(TStopwatch::EstimateSizes);
Sw->Start(TStopwatch::InitGraph);
PNEANetMP PNewGraph = TNEANetMP::New(SubgraphSz, SubgraphEdgeSz);
TIntSet ETypeIdSet(ETypeIdV);
Sw->Stop(TStopwatch::InitGraph);
TIntV OutETypeIdVV[NTypeIdV.Len()];
TIntV InETypeIdVV[NTypeIdV.Len()];
Sw->Start(TStopwatch::ExtractNbrETypes);
#pragma omp parallel for schedule(static)
for (int i = 0; i < NTypeIdV.Len(); i++) {
TInt NTypeId = NTypeIdV[i];
TIntV* POutETypes = &(OutETypes[NTypeId]);
for (TIntV::TIter iter = POutETypes->BegI(); iter < POutETypes->EndI(); iter++) {
if (ETypeIdSet.IsKey(*iter)) { OutETypeIdVV[i].Add(*iter); }
}
TIntV* PInETypes = &(InETypes[NTypeId]);
for (TIntV::TIter iter = PInETypes->BegI(); iter < PInETypes->EndI(); iter++) {
if (ETypeIdSet.IsKey(*iter)) { InETypeIdVV[i].Add(*iter); }
}
}
Sw->Stop(TStopwatch::ExtractNbrETypes);
TIntV Offsets(NTypeIdV.Len()+1);
Offsets[0] = 0;
for (int i = 0; i < NTypeIdV.Len(); i++) {
Offsets[i+1] = Offsets[i] + TypeNodeV[NTypeIdV[i]].NodeH.GetMxKeyIds();
}
Sw->Start(TStopwatch::PopulateGraph);
#pragma omp parallel for schedule(static)
for (int j = 0; j < Offsets[NTypeIdV.Len()]; j++) {
int i;
Offsets.SearchBinLeft(j, i);
THash<TInt,TNode> *NodeHPtr = &(TypeNodeV[NTypeIdV[i]].NodeH);
int KeyId = j - Offsets[i];
if (!NodeHPtr->IsKeyId(KeyId)) { continue; }
TNode* PNode = &((*NodeHPtr)[KeyId]);
int NId = PNode->GetId();
TIntV EIdV;
TIntV OutEIdV;
for (TIntV::TIter iter = OutETypeIdVV[i].BegI(); iter < OutETypeIdVV[i].EndI(); iter++) {
PNode->GetOutEIdV((*iter).Val, EIdV);
OutEIdV.AddV(EIdV);
}
TIntV InEIdV;
for (TIntV::TIter iter = InETypeIdVV[i].BegI(); iter < InETypeIdVV[i].EndI(); iter++) {
PNode->GetInEIdV((*iter).Val, EIdV);
InEIdV.AddV(EIdV);
}
PNewGraph->AddNodeWithEdges(NId, InEIdV, OutEIdV);
for (TIntV::TIter iter = OutEIdV.BegI(); iter < OutEIdV.EndI(); iter++) {
PNewGraph->AddEdgeUnchecked((*iter), NId, GetEdge(*iter).GetDstNId());
}
}
Sw->Stop(TStopwatch::PopulateGraph);
PNewGraph->SetNodes(SubgraphSz);
PNewGraph->SetEdges(SubgraphEdgeSz);
return PNewGraph;
}
/// Extracts the subgraph by finding the nodes and then finding all neighbors (node-based)
PNEANetMP GetSubGraphTNEANetMP2(const TIntV& NTypeIdV, const TIntV& ETypeIdV) {
TStopwatch* Sw = TStopwatch::GetInstance();
Sw->Start(TStopwatch::EstimateSizes);
int SubgraphSz = 0;
for (TIntV::TIter iter = NTypeIdV.BegI(); iter < NTypeIdV.EndI(); iter++) {
SubgraphSz += GetNodes((*iter).Val);
}
int SubgraphEdgeSz = 0;
for (TIntV::TIter iter = ETypeIdV.BegI(); iter < ETypeIdV.EndI(); iter++) {
SubgraphEdgeSz += GetEdges((*iter).Val);
}
Sw->Stop(TStopwatch::EstimateSizes);
Sw->Start(TStopwatch::InitGraph);
PNEANetMP PNewGraph = TNEANetMP::New(SubgraphSz, SubgraphEdgeSz);
TIntSet ETypeIdSet(ETypeIdV);
Sw->Stop(TStopwatch::InitGraph);
// int NThreads = omp_get_max_threads();
// TIntV VectorPool[2*NThreads];
for (int i = 0; i < NTypeIdV.Len(); i++) {
Sw->Start(TStopwatch::ExtractNbrETypes);
TInt NTypeId = NTypeIdV[i];
TIntV* POutETypes = &(OutETypes[NTypeId]);
TIntV OutETypeIdV;
for (TIntV::TIter iter = POutETypes->BegI(); iter < POutETypes->EndI(); iter++) {
if (ETypeIdSet.IsKey(*iter)) { OutETypeIdV.Add(*iter); }
}
TIntV* PInETypes = &(InETypes[NTypeId]);
TIntV InETypeIdV;
for (TIntV::TIter iter = PInETypes->BegI(); iter < PInETypes->EndI(); iter++) {
if (ETypeIdSet.IsKey(*iter)) { InETypeIdV.Add(*iter); }
}
Sw->Stop(TStopwatch::ExtractNbrETypes);
Sw->Start(TStopwatch::PopulateGraph);
THash<TInt,TNode> *NodeHPtr = &(TypeNodeV[NTypeId].NodeH);
// omp_set_num_threads(NThreads);
#pragma omp parallel for schedule(static)
for (int KeyId = 0; KeyId < NodeHPtr->GetMxKeyIds(); KeyId++) {
if (!NodeHPtr->IsKeyId(KeyId)) { continue; }
TIntV OutEIdV;
TIntV InEIdV;
// Sw->Start(TStopwatch::ExtractEdges);
TNode* PNode = &((*NodeHPtr)[KeyId]);
int NId = PNode->GetId();
//Sw->Start(TStopwatch::ExtractEdges);
// OutEIdV.Reduce(0);
// for (TIntV::TIter iter = OutETypeIdV.BegI(); iter < OutETypeIdV.EndI(); iter++) {
// PNode->GetOutEIdV((*iter).Val, *EIdV);
// OutEIdV->AddV(*EIdV);
// }
// InEIdV.Reduce(0);
// for (TIntV::TIter iter = InETypeIdV.BegI(); iter < InETypeIdV.EndI(); iter++) {
// PNode->GetInEIdV((*iter).Val, *EIdV);
// InEIdV->AddV(*EIdV);
// }
PNode->GetOutEIdV(OutETypeIdV, OutEIdV);
PNode->GetInEIdV(InETypeIdV, InEIdV);
// Sw->Stop(TStopwatch::ExtractEdges);
// Sw->Start(TStopwatch::BuildSubgraph);
PNewGraph->AddNodeWithEdges(NId, InEIdV, OutEIdV);
for (TIntV::TIter iter = OutEIdV.BegI(); iter < OutEIdV.EndI(); iter++) {
PNewGraph->AddEdgeUnchecked((*iter), NId, GetEdge(*iter).GetDstNId());
}
// Sw->Stop(TStopwatch::BuildSubgraph);
}
Sw->Stop(TStopwatch::PopulateGraph);
}
PNewGraph->SetNodes(SubgraphSz);
PNewGraph->SetEdges(SubgraphEdgeSz);
return PNewGraph;
}
#endif // GCC_ATOMIC
friend class TPt<TMNet>;
};
typedef TMNet<TSVNode> TSVNet;
typedef TPt<TSVNet> PSVNet;
typedef TMNet<TMVNode> TMVNet;
typedef TPt<TMVNet> PMVNet;
typedef TMNet<TCVNode> TCVNet;
typedef TPt<TCVNet> PCVNet;
#endif // MGRAPH_H
|
threading.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifndef LIGHTGBM_UTILS_THREADING_H_
#define LIGHTGBM_UTILS_THREADING_H_
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <algorithm>
#include <functional>
#include <vector>
namespace LightGBM {
class Threading {
public:
template <typename INDEX_T>
static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block,
int* out_nblock, INDEX_T* block_size) {
int num_threads = OMP_NUM_THREADS();
BlockInfo<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock,
block_size);
}
template <typename INDEX_T>
static inline void BlockInfo(int num_threads, INDEX_T cnt,
INDEX_T min_cnt_per_block, int* out_nblock,
INDEX_T* block_size) {
*out_nblock = std::min<int>(
num_threads,
static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block));
if (*out_nblock > 1) {
*block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock));
} else {
*block_size = cnt;
}
}
template <typename INDEX_T>
static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt,
INDEX_T min_cnt_per_block,
int* out_nblock, INDEX_T* block_size) {
*out_nblock = std::min<int>(
num_threads,
static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block));
if (*out_nblock > 1) {
*block_size = (cnt + (*out_nblock) - 1) / (*out_nblock);
// force the block size to the times of min_cnt_per_block
*block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block *
min_cnt_per_block;
} else {
*block_size = cnt;
}
}
template <typename INDEX_T>
static inline void BlockInfoForceSize(INDEX_T cnt, INDEX_T min_cnt_per_block,
int* out_nblock, INDEX_T* block_size) {
int num_threads = OMP_NUM_THREADS();
BlockInfoForceSize<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock,
block_size);
}
template <typename INDEX_T>
static inline int For(
INDEX_T start, INDEX_T end, INDEX_T min_block_size,
const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) {
int n_block = 1;
INDEX_T num_inner = end - start;
BlockInfo<INDEX_T>(end - start, min_block_size, &n_block, &num_inner);
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < n_block; ++i) {
OMP_LOOP_EX_BEGIN();
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = std::min(end, inner_start + num_inner);
inner_fun(i, inner_start, inner_end);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
return n_block;
}
template <typename INDEX_T, typename VAL1_T, typename VAL2_T>
static inline int SumReduction(
INDEX_T start, INDEX_T end, INDEX_T min_block_size,
const std::function<void(int, INDEX_T, INDEX_T, VAL1_T* res1,
VAL2_T* res2)>& inner_fun,
VAL1_T* res1, VAL2_T* res2) {
int n_block = 1;
INDEX_T num_inner = end - start;
BlockInfoForceSize<INDEX_T>(end - start, min_block_size, &n_block,
&num_inner);
std::vector<VAL1_T> val_1s(n_block, static_cast<VAL1_T>(0));
std::vector<VAL2_T> val_2s(n_block, static_cast<VAL2_T>(0));
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < n_block; ++i) {
OMP_LOOP_EX_BEGIN();
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = std::min(end, inner_start + num_inner);
inner_fun(i, inner_start, inner_end, &val_1s[i], &val_2s[i]);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
*res1 = 0;
*res2 = 0;
for (int i = 0; i < n_block; ++i) {
*res1 += val_1s[i];
*res2 += val_2s[i];
}
return n_block;
}
};
template <typename INDEX_T, bool TWO_BUFFER>
class ParallelPartitionRunner {
public:
ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size)
: min_block_size_(min_block_size) {
num_threads_ = OMP_NUM_THREADS();
left_.resize(num_data);
if (TWO_BUFFER) {
right_.resize(num_data);
}
offsets_.resize(num_threads_);
left_cnts_.resize(num_threads_);
right_cnts_.resize(num_threads_);
left_write_pos_.resize(num_threads_);
right_write_pos_.resize(num_threads_);
}
~ParallelPartitionRunner() {}
void ReSize(INDEX_T num_data) {
left_.resize(num_data);
if (TWO_BUFFER) {
right_.resize(num_data);
}
}
template<bool FORCE_SIZE>
INDEX_T Run(
INDEX_T cnt,
const std::function<INDEX_T(int, INDEX_T, INDEX_T, INDEX_T*, INDEX_T*)>& func,
INDEX_T* out) {
int nblock = 1;
INDEX_T inner_size = cnt;
if (FORCE_SIZE) {
Threading::BlockInfoForceSize<INDEX_T>(num_threads_, cnt, min_block_size_,
&nblock, &inner_size);
} else {
Threading::BlockInfo<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock,
&inner_size);
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1) num_threads(num_threads_)
for (int i = 0; i < nblock; ++i) {
OMP_LOOP_EX_BEGIN();
INDEX_T cur_start = i * inner_size;
INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start);
offsets_[i] = cur_start;
if (cur_cnt <= 0) {
left_cnts_[i] = 0;
right_cnts_[i] = 0;
continue;
}
auto left_ptr = left_.data() + cur_start;
INDEX_T* right_ptr = nullptr;
if (TWO_BUFFER) {
right_ptr = right_.data() + cur_start;
}
// split data inner, reduce the times of function called
INDEX_T cur_left_count =
func(i, cur_start, cur_cnt, left_ptr, right_ptr);
if (!TWO_BUFFER) {
// reverse for one buffer
std::reverse(left_ptr + cur_left_count, left_ptr + cur_cnt);
}
left_cnts_[i] = cur_left_count;
right_cnts_[i] = cur_cnt - cur_left_count;
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
left_write_pos_[0] = 0;
right_write_pos_[0] = 0;
for (int i = 1; i < nblock; ++i) {
left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1];
right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1];
}
data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1];
auto right_start = out + left_cnt;
#pragma omp parallel for schedule(static, 1) num_threads(num_threads_)
for (int i = 0; i < nblock; ++i) {
std::copy_n(left_.data() + offsets_[i], left_cnts_[i],
out + left_write_pos_[i]);
if (TWO_BUFFER) {
std::copy_n(right_.data() + offsets_[i], right_cnts_[i],
right_start + right_write_pos_[i]);
} else {
std::copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i],
right_start + right_write_pos_[i]);
}
}
return left_cnt;
}
private:
int num_threads_;
INDEX_T min_block_size_;
std::vector<INDEX_T> left_;
std::vector<INDEX_T> right_;
std::vector<INDEX_T> offsets_;
std::vector<INDEX_T> left_cnts_;
std::vector<INDEX_T> right_cnts_;
std::vector<INDEX_T> left_write_pos_;
std::vector<INDEX_T> right_write_pos_;
};
} // namespace LightGBM
#endif // LightGBM_UTILS_THREADING_H_
|
DRB057-jacobiinitialize-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Use of private() clause
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <math.h>
#define MSIZE 200
int n=MSIZE, m=MSIZE;
double alpha = 0.0543;
double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE];
double dx, dy;
void
initialize ()
{
int i, j, xx, yy;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
#pragma omp parallel for private(i,j,xx,yy)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
{
xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */
yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */
u[i][j] = 0.0;
f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)
- 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);
}
}
int main()
{
omprace_init();
initialize();
omprace_fini();
return 0;
}
|
spectral_sequence_reduction.h | /* Copyright 2013 IST Austria
Contributed by: Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include <phat/helpers/misc.h>
#include <phat/boundary_matrix.h>
namespace phat {
class spectral_sequence_reduction {
public:
template< typename Representation >
void operator () ( boundary_matrix< Representation >& boundary_matrix ) {
const index nr_columns = boundary_matrix.get_num_cols();
std::vector< index > lowest_one_lookup( nr_columns, -1 );
//const index num_stripes = (index) sqrt( (double)nr_columns );
const index num_stripes = omp_get_max_threads();
index block_size = ( nr_columns % num_stripes == 0 ) ? nr_columns / num_stripes : block_size = nr_columns / num_stripes + 1;
std::vector< std::vector< index > > unreduced_cols_cur_pass( num_stripes );
std::vector< std::vector< index > > unreduced_cols_next_pass( num_stripes );
for( index cur_dim = boundary_matrix.get_max_dim(); cur_dim >= 1 ; cur_dim-- ) {
#pragma omp parallel for schedule( guided, 1 )
for( index cur_stripe = 0; cur_stripe < num_stripes; cur_stripe++ ) {
index col_begin = cur_stripe * block_size;
index col_end = std::min( (cur_stripe+1) * block_size, nr_columns );
for( index cur_col = col_begin; cur_col < col_end; cur_col++ )
if( boundary_matrix.get_dim( cur_col ) == cur_dim && boundary_matrix.get_max_index( cur_col ) != -1 )
unreduced_cols_cur_pass[ cur_stripe ].push_back( cur_col );
}
for( index cur_pass = 0; cur_pass < num_stripes; cur_pass++ ) {
boundary_matrix.sync();
#pragma omp parallel for schedule( guided, 1 )
for( int cur_stripe = 0; cur_stripe < num_stripes; cur_stripe++ ) {
index row_begin = (cur_stripe - cur_pass) * block_size;
index row_end = row_begin + block_size;
unreduced_cols_next_pass[ cur_stripe ].clear();
for( index idx = 0; idx < (index)unreduced_cols_cur_pass[ cur_stripe ].size(); idx++ ) {
index cur_col = unreduced_cols_cur_pass[ cur_stripe ][ idx ];
index lowest_one = boundary_matrix.get_max_index( cur_col );
while( lowest_one != -1 && lowest_one >= row_begin && lowest_one < row_end && lowest_one_lookup[ lowest_one ] != -1 ) {
boundary_matrix.add_to( lowest_one_lookup[ lowest_one ], cur_col );
lowest_one = boundary_matrix.get_max_index( cur_col );
}
if( lowest_one != -1 ) {
if( lowest_one >= row_begin && lowest_one < row_end ) {
lowest_one_lookup[ lowest_one ] = cur_col;
boundary_matrix.clear( lowest_one );
boundary_matrix.finalize( cur_col );
} else {
unreduced_cols_next_pass[ cur_stripe ].push_back( cur_col );
}
}
}
unreduced_cols_next_pass[ cur_stripe ].swap( unreduced_cols_cur_pass[ cur_stripe ] );
}
}
}
}
};
}
|
matmul-parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define SEED 123
void free_matrix(int **m, int size) {
for (int i = 0; i < size; i++)
free(m[i]);
free(m);
}
int **mul(int **a, int **b, int size) {
int **ret = malloc(size * sizeof(int *));
for (int i = 0; i < size; i++) {
ret[i] = calloc(size, sizeof(int));
for (int j = 0; j < size; j++)
for (int k = 0; k < size; k++)
ret[i][j] += a[i][k] * b[k][j];
}
return ret;
}
// Parallelise this function:
int **array_mul(int ***data, int n, int size) {
#pragma omp parallel
{
#pragma omp single
{
for (int i = 1; i <= n; i*=2) {
#pragma omp task shared(i)
{
for (int j = 0; j + i < n; j += i*2)
data[j] = mul(data[j], data[j+i], size);
}
#pragma omp taskwait
}
}
}
return data[0];
}
int **rnd_matrix(int size) {
int **ret = malloc(size * sizeof(int *));
for (int i = 0; i < size; i++) {
ret[i] = malloc(size * sizeof(int));
for (int j = 0; j < size; j++)
ret[i][j] = 2 * (rand() % 2) - 1; // Generates -1 or 1
}
return ret;
}
void print_matrix(int **m, int size) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++)
printf("%d ", m[i][j]);
printf("\n");
}
}
int main(int argc, char **argv) {
int n, size;
double t;
FILE *input;
if (argc < 2) {
fprintf(stderr, "Error: missing path to input file!\n");
return EXIT_FAILURE;
}
if ((input = fopen(argv[1], "r")) == NULL) {
fprintf(stderr, "Error: could not open input file!\n");
return EXIT_FAILURE;
}
fscanf(input, "%d %d", &n, &size);
srand(SEED);
// Do not change this line
omp_set_num_threads(4);
int ***data = malloc(n * sizeof(int **));
for (int i = 0; i < n; i++)
data[i] = rnd_matrix(size);
t = omp_get_wtime();
int **ret = array_mul(data, n, size);
t = omp_get_wtime() - t;
print_matrix(ret, size);
fprintf(stderr, "%lf\n", t);
free_matrix(ret, size);
free(data);
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
pubkeylp.h | /**
* @file pubkeylp.h -- Public key type for lattice crypto operations.
* @author TPOC: palisade@njit.edu
*
* @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT)
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or other
* materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef LBCRYPTO_CRYPTO_PUBKEYLP_H
#define LBCRYPTO_CRYPTO_PUBKEYLP_H
//Includes Section
#include <vector>
#include <iomanip>
#include "lattice/elemparams.h"
#include "lattice/ilparams.h"
#include "lattice/ildcrtparams.h"
#include "lattice/ilelement.h"
#include "utils/inttypes.h"
#include "utils/hashutil.h"
#include "math/distrgen.h"
#include "utils/serializablehelper.h"
#include "encoding/encodingparams.h"
/**
* @namespace lbcrypto
* The namespace of lbcrypto
*/
namespace lbcrypto {
//forward declarations, used to resolve circular header dependencies
template<typename Element>
class CiphertextImpl;
template<typename Element>
using Ciphertext = shared_ptr<CiphertextImpl<Element>>;
template<typename Element>
class RationalCiphertext;
template<typename Element>
class LPCryptoParameters;
template<typename Element>
class LPCryptoParametersLTV;
template<typename Element>
class LPCryptoParametersBGV;
template<typename Element>
class LPCryptoParametersBFV;
template<typename Element>
class LPCryptoParametersStehleSteinfeld;
template<typename Element>
class CryptoObject;
struct EncryptResult {
explicit EncryptResult() : isValid(false), numBytesEncrypted(0) {}
explicit EncryptResult(size_t len) : isValid(true), numBytesEncrypted(len) {}
bool isValid; /**< whether the encryption was successful */
usint numBytesEncrypted; /**< count of the number of plaintext bytes that were encrypted */
};
/**
* @brief Decryption result. This represents whether the decryption of a cipheretext was performed correctly.
*
* This is intended to eventually incorporate information about the amount of padding in a decoded ciphertext,
* to ensure that the correct amount of padding is stripped away.
* It is intended to provided a very simple kind of checksum eventually.
* This notion of a decoding output is inherited from the crypto++ library.
* It is also intended to be used in a recover and restart robust functionality if not all ciphertext is recieved over a lossy channel, so that if all information is eventually recieved, decoding/decryption can be performed eventually.
* This is intended to be returned with the output of a decryption operation.
*/
struct DecryptResult {
/**
* Constructor that initializes all message lengths to 0.
*/
explicit DecryptResult() : isValid(false), messageLength(0) {}
/**
* Constructor that initializes all message lengths.
* @param len the new length.
*/
explicit DecryptResult(size_t len) : isValid(true), messageLength(len) {}
bool isValid; /**< whether the decryption was successful */
usint messageLength; /**< the length of the decrypted plaintext message */
};
/**
* @brief Abstract interface class for LP Keys
*
* @tparam Element a ring element.
*/
template <class Element>
class LPKey : public CryptoObject<Element>, public Serializable {
public:
LPKey(CryptoContext<Element> cc, const string& id = "") : CryptoObject<Element>(cc, id) {}
LPKey(shared_ptr<CryptoObject<Element>> co) : CryptoObject<Element>(co) {}
virtual ~LPKey() {}
};
template<typename Element>
class LPPublicKeyImpl;
template<typename Element>
using LPPublicKey = shared_ptr<LPPublicKeyImpl<Element>>;
/**
* @brief Concrete class for LP public keys
* @tparam Element a ring element.
*/
template <typename Element>
class LPPublicKeyImpl : public LPKey<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPPublicKeyImpl(CryptoContext<Element> cc, const string& id = "") : LPKey<Element>(cc, id) {}
/**
* Copy constructor
*
*@param &rhs LPPublicKeyImpl to copy from
*/
explicit LPPublicKeyImpl(const LPPublicKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
m_h = rhs.m_h;
}
/**
* Move constructor
*
*@param &rhs LPPublicKeyImpl to move from
*/
explicit LPPublicKeyImpl(LPPublicKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
m_h = std::move(rhs.m_h);
}
/**
* Assignment Operator.
*
* @param &rhs LPPublicKeyImpl to copy from
*/
const LPPublicKeyImpl<Element>& operator=(const LPPublicKeyImpl<Element> &rhs) {
this->context = rhs.context;
this->m_h = rhs.m_h;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs LPPublicKeyImpl to copy from
*/
const LPPublicKeyImpl<Element>& operator=(LPPublicKeyImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_h = std::move(rhs.m_h);
return *this;
}
//@Get Properties
/**
* Gets the computed public key
* @return the public key element.
*/
const std::vector<Element> &GetPublicElements() const {
return this->m_h;
}
//@Set Properties
/**
* Sets the public key vector of Element.
* @param &element is the public key Element vector to be copied.
*/
void SetPublicElements(const std::vector<Element> &element) {
m_h = element;
}
/**
* Sets the public key vector of Element.
* @param &&element is the public key Element vector to be moved.
*/
void SetPublicElements(std::vector<Element> &&element) {
m_h = std::move(element);
}
/**
* Sets the public key Element at index idx.
* @param &element is the public key Element to be copied.
*/
void SetPublicElementAtIndex(usint idx, const Element &element) {
m_h.insert(m_h.begin() + idx, element);
}
/**
* Sets the public key Element at index idx.
* @param &&element is the public key Element to be moved.
*/
void SetPublicElementAtIndex(usint idx, Element &&element) {
m_h.insert(m_h.begin() + idx, std::move(element));
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @param fileFlag is an object-specific parameter for the serialization
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* Populate the object from the deserialization of the Serialized
* @param &serObj contains the serialized object
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool operator==(const LPPublicKeyImpl& other) const {
if( !CryptoObject<Element>::operator ==(other) )
return false;
if( m_h.size() != other.m_h.size() )
return false;
for( size_t i = 0; i < m_h.size(); i++ )
if( m_h[i] != other.m_h[i] )
return false;
return true;
}
bool operator!=(const LPPublicKeyImpl& other) const { return ! (*this == other); }
private:
std::vector<Element> m_h;
};
template<typename Element>
class LPEvalKeyImpl;
template<typename Element>
using LPEvalKey = shared_ptr<LPEvalKeyImpl<Element>>;
/**
* @brief Abstract interface for LP evaluation/proxy keys
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyImpl : public LPKey<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyImpl(CryptoContext<Element> cc) : LPKey<Element>(cc) {}
virtual ~LPEvalKeyImpl() {}
/**
* Setter function to store Relinearization Element Vector A.
* Throws exception, to be overridden by derived class.
*
* @param &a is the Element vector to be copied.
*/
virtual void SetAVector(const std::vector<Element> &a) {
throw std::runtime_error("SetAVector copy operation not supported");
}
/**
* Setter function to store Relinearization Element Vector A.
* Throws exception, to be overridden by derived class.
*
* @param &&a is the Element vector to be moved.
*/
virtual void SetAVector(std::vector<Element> &&a) {
throw std::runtime_error("SetAVector move operation not supported");
}
/**
* Getter function to access Relinearization Element Vector A.
* Throws exception, to be overridden by derived class.
*
* @return Element vector A.
*/
virtual const std::vector<Element> &GetAVector() const {
throw std::runtime_error("GetAVector operation not supported");
}
/**
* Setter function to store Relinearization Element Vector B.
* Throws exception, to be overridden by derived class.
*
* @param &b is the Element vector to be copied.
*/
virtual void SetBVector(const std::vector<Element> &b) {
throw std::runtime_error("SetBVector copy operation not supported");
}
/**
* Setter function to store Relinearization Element Vector B.
* Throws exception, to be overridden by derived class.
*
* @param &&b is the Element vector to be moved.
*/
virtual void SetBVector(std::vector<Element> &&b) {
throw std::runtime_error("SetBVector move operation not supported");
}
/**
* Getter function to access Relinearization Element Vector B.
* Throws exception, to be overridden by derived class.
*
* @return Element vector B.
*/
virtual const std::vector<Element> &GetBVector() const {
throw std::runtime_error("GetBVector operation not supported");
}
/**
* Setter function to store key switch Element.
* Throws exception, to be overridden by derived class.
*
* @param &a is the Element to be copied.
*/
virtual void SetA(const Element &a) {
throw std::runtime_error("SetA copy operation not supported");
}
/**
* Setter function to store key switch Element.
* Throws exception, to be overridden by derived class.
*
* @param &&a is the Element to be moved.
*/
virtual void SetA(Element &&a) {
throw std::runtime_error("SetA move operation not supported");
}
/**
* Getter function to access key switch Element.
* Throws exception, to be overridden by derived class.
*
* @return Element.
*/
virtual const Element &GetA() const {
throw std::runtime_error("GetA operation not supported");
}
friend bool operator==(const LPEvalKeyImpl& a, const LPEvalKeyImpl& b) {
return a.key_compare(b);
}
friend bool operator!=(const LPEvalKeyImpl& a, LPEvalKeyImpl& b) { return ! (a == b); }
virtual bool key_compare(const LPEvalKeyImpl& other) const = 0;
};
template<typename Element>
class LPEvalKeyRelinImpl;
template<typename Element>
using LPEvalKeyRelin = shared_ptr<LPEvalKeyRelinImpl<Element>>;
/**
* @brief Concrete class for Relinearization keys of RLWE scheme
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyRelinImpl : public LPEvalKeyImpl<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyRelinImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {}
virtual ~LPEvalKeyRelinImpl() {}
/**
* Copy constructor
*
*@param &rhs key to copy from
*/
explicit LPEvalKeyRelinImpl(const LPEvalKeyRelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = rhs.m_rKey;
}
/**
* Move constructor
*
*@param &rhs key to move from
*/
explicit LPEvalKeyRelinImpl(LPEvalKeyRelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = std::move(rhs.m_rKey);
}
/**
* Assignment Operator.
*
* @param &rhs key to copy from
*/
const LPEvalKeyRelinImpl<Element>& operator=(const LPEvalKeyRelinImpl<Element> &rhs) {
this->context = rhs.context;
this->m_rKey = rhs.m_rKey;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs key to move from
*/
const LPEvalKeyRelinImpl<Element>& operator=(LPEvalKeyRelinImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_rKey = std::move(rhs.m_rKey);
return *this;
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &a is the Element vector to be copied.
*/
virtual void SetAVector(const std::vector<Element> &a) {
m_rKey.insert(m_rKey.begin() + 0, a);
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &&a is the Element vector to be moved.
*/
virtual void SetAVector(std::vector<Element> &&a) {
m_rKey.insert(m_rKey.begin() + 0, std::move(a));
}
/**
* Getter function to access Relinearization Element Vector A.
* Overrides base class implementation.
*
* @return Element vector A.
*/
virtual const std::vector<Element> &GetAVector() const {
return m_rKey.at(0);
}
/**
* Setter function to store Relinearization Element Vector B.
* Overrides base class implementation.
*
* @param &b is the Element vector to be copied.
*/
virtual void SetBVector(const std::vector<Element> &b) {
m_rKey.insert(m_rKey.begin() + 1, b);
}
/**
* Setter function to store Relinearization Element Vector B.
* Overrides base class implementation.
*
* @param &&b is the Element vector to be moved.
*/
virtual void SetBVector(std::vector<Element> &&b) {
m_rKey.insert(m_rKey.begin() + 1, std::move(b));
}
/**
* Getter function to access Relinearization Element Vector B.
* Overrides base class implementation.
*
* @return Element vector B.
*/
virtual const std::vector<Element> &GetBVector() const {
return m_rKey.at(1);
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool SerializeWithoutContext(Serialized *serObj) const;
/**
* Deserialize from the serialization
* @param serObj - contains the serialization
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool key_compare(const LPEvalKeyImpl<Element>& other) const {
const LPEvalKeyRelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyRelinImpl<Element> &>(other);
if( !CryptoObject<Element>::operator==(other) )
return false;
if( this->m_rKey.size() != oth.m_rKey.size() ) return false;
for( size_t i=0; i<this->m_rKey.size(); i++ ) {
if( this->m_rKey[i].size() != oth.m_rKey[i].size() ) return false;
for( size_t j=0; j<this->m_rKey[i].size(); j++ ) {
if( this->m_rKey[i][j] != oth.m_rKey[i][j] )
return false;
}
}
return true;
}
private:
//private member to store vector of vector of Element.
std::vector< std::vector<Element> > m_rKey;
};
template<typename Element>
class LPEvalKeyNTRURelinImpl;
template<typename Element>
using LPEvalKeyNTRURelin = shared_ptr<LPEvalKeyNTRURelinImpl<Element>>;
/**
* @brief Evaluation Relinearization keys for NTRU scheme.
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyNTRURelinImpl : public LPEvalKeyImpl<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyNTRURelinImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {}
virtual ~LPEvalKeyNTRURelinImpl() {}
/**
* Copy constructor
*
*@param &rhs key to copy from
*/
explicit LPEvalKeyNTRURelinImpl(const LPEvalKeyNTRURelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = rhs.m_rKey;
}
/**
* Move constructor
*
*@param &rhs key to move from
*/
explicit LPEvalKeyNTRURelinImpl(LPEvalKeyNTRURelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_rKey = std::move(rhs.m_rKey);
}
/**
* Assignment Operator.
*
* @param &rhs key to copy from
*/
const LPEvalKeyNTRURelinImpl<Element>& operator=(const LPEvalKeyNTRURelinImpl<Element> &rhs) {
this->context = rhs.context;
this->m_rKey = rhs.m_rKey;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs key to move from
*/
const LPEvalKeyNTRURelinImpl<Element>& operator=(LPEvalKeyNTRURelinImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_rKey = std::move(rhs.m_rKey);
return *this;
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &a is the Element vector to be copied.
*/
virtual void SetAVector(const std::vector<Element> &a) {
for (usint i = 0; i < a.size(); i++) {
m_rKey.insert(m_rKey.begin() + i, a.at(i));
}
}
/**
* Setter function to store Relinearization Element Vector A.
* Overrides base class implementation.
*
* @param &&a is the Element vector to be moved.
*/
virtual void SetAVector(std::vector<Element> &&a) {
m_rKey = std::move(a);
}
/**
* Getter function to access Relinearization Element Vector A.
* Overrides base class implementation.
*
* @return Element vector A.
*/
virtual const std::vector<Element> &GetAVector() const {
return m_rKey;
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool SerializeWithoutContext(Serialized *serObj) const;
/**
* Deserialize from the serialization
* @param serObj - contains the serialization
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool key_compare(const LPEvalKeyImpl<Element>& other) const {
const LPEvalKeyNTRURelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRURelinImpl<Element> &>(other);
if( !CryptoObject<Element>::operator ==(other) )
return false;
if( this->m_rKey.size() != oth.m_rKey.size() ) return false;
for( size_t i=0; i<this->m_rKey.size(); i++ ) {
if( this->m_rKey[i] != oth.m_rKey[i] )
return false;
}
return true;
}
private:
//private member to store vector of Element.
std::vector<Element> m_rKey;
};
template<typename Element>
class LPEvalKeyNTRUImpl;
template<typename Element>
using LPEvalKeyNTRU = shared_ptr<LPEvalKeyNTRUImpl<Element>>;
/**
* @brief Concrete class for facilitating NTRU key switch.
* @tparam Element a ring element.
*/
template <class Element>
class LPEvalKeyNTRUImpl : public LPEvalKeyImpl<Element> {
public:
/**
* Basic constructor for setting crypto params
*
* @param &cryptoParams is the reference to cryptoParams
*/
LPEvalKeyNTRUImpl(CryptoContext<Element> cc) : LPEvalKeyImpl<Element>(cc) {}
virtual ~LPEvalKeyNTRUImpl() {}
/**
* Copy constructor
*
*@param &rhs key to copy from
*/
explicit LPEvalKeyNTRUImpl(const LPEvalKeyNTRUImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_Key = rhs.m_Key;
}
/**
* Move constructor
*
*@param &rhs key to move from
*/
explicit LPEvalKeyNTRUImpl(LPEvalKeyNTRUImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) {
m_Key = std::move(rhs.m_Key);
}
/**
* Assignment Operator.
*
* @param &rhs key to copy from
*/
const LPEvalKeyNTRUImpl<Element>& operator=(const LPEvalKeyNTRUImpl<Element> &rhs) {
this->context = rhs.context;
this->m_Key = rhs.m_Key;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs key to move from
*/
const LPEvalKeyNTRUImpl<Element>& operator=(LPEvalKeyNTRUImpl<Element> &&rhs) {
this->context = rhs.context;
rhs.context = 0;
m_Key = std::move(rhs.m_Key);
return *this;
}
/**
* Setter function to store NTRU key switch element.
* Function copies the key.
* Overrides the virtual function from base class LPEvalKeyImpl.
*
* @param &a is the key switch element to be copied.
*/
virtual void SetA(const Element &a) {
m_Key = a;
}
/**
* Setter function to store NTRU key switch Element.
* Function moves the key.
* Overrides the virtual function from base class LPEvalKeyImpl.
*
* @param &&a is the key switch Element to be moved.
*/
virtual void SetA(Element &&a) {
m_Key = std::move(a);
}
/**
* Getter function to access NTRU key switch Element.
* Overrides the virtual function from base class LPEvalKeyImpl.
*
* @return NTRU key switch Element.
*/
virtual const Element& GetA() const {
return m_Key;
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* SerializeWithoutContext - serializes the object into a Serialized, withut the cryptocontext
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool SerializeWithoutContext(Serialized *serObj) const;
/**
* Deserialize from the serialization
* @param serObj - contains the serialization
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool key_compare(const LPEvalKeyImpl<Element>& other) const {
const LPEvalKeyNTRUImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRUImpl<Element> &>(other);
if( !CryptoObject<Element>::operator ==(other) )
return false;
if( this->m_Key != oth.m_Key )
return false;
return true;
}
private:
/**
* private member Element to store key.
*/
Element m_Key;
};
template<typename Element>
class LPPrivateKeyImpl;
template<typename Element>
using LPPrivateKey = shared_ptr<LPPrivateKeyImpl<Element>>;
/**
* @brief Private key implementation template for Ring-LWE, NTRU-based schemes,
* @tparam Element a ring element.
*/
template <class Element>
class LPPrivateKeyImpl : public LPKey<Element> {
public:
/**
* Construct in context
*/
LPPrivateKeyImpl(CryptoContext<Element> cc) : LPKey<Element>(cc, GenerateUniqueKeyID()) {}
/**
* Copy constructor
*@param &rhs the LPPrivateKeyImpl to copy from
*/
explicit LPPrivateKeyImpl(const LPPrivateKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
this->m_sk = rhs.m_sk;
}
/**
* Move constructor
*@param &rhs the LPPrivateKeyImpl to move from
*/
explicit LPPrivateKeyImpl(LPPrivateKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) {
this->m_sk = std::move(rhs.m_sk);
}
/**
* Assignment Operator.
*
* @param &rhs LPPrivateKeyto assign from.
* @return the resulting LPPrivateKeyImpl
*/
const LPPrivateKeyImpl<Element>& operator=(const LPPrivateKeyImpl<Element> &rhs) {
CryptoObject<Element>::operator=(rhs);
this->m_sk = rhs.m_sk;
return *this;
}
/**
* Move Assignment Operator.
*
* @param &rhs LPPrivateKeyImpl to assign from.
* @return the resulting LPPrivateKeyImpl
*/
const LPPrivateKeyImpl<Element>& operator=(LPPrivateKeyImpl<Element> &&rhs) {
CryptoObject<Element>::operator=(rhs);
this->m_sk = std::move(rhs.m_sk);
return *this;
}
/**
* Implementation of the Get accessor for private element.
* @return the private element.
*/
const Element & GetPrivateElement() const { return m_sk; }
/**
* Set accessor for private element.
* @private &x private element to set to.
*/
void SetPrivateElement(const Element &x) {
m_sk = x;
}
/**
* Set accessor for private element.
* @private &x private element to set to.
*/
void SetPrivateElement(Element &&x) {
m_sk = std::move(x);
}
/**
* Serialize the object into a Serialized
* @param *serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject());
* @return true if successfully serialized
*/
bool Serialize(Serialized *serObj) const;
/**
* Populate the object from the deserialization of the Setialized
* @param &serObj contains the serialized object
* @return true on success
*/
bool Deserialize(const Serialized &serObj);
bool operator==(const LPPrivateKeyImpl& other) const {
return CryptoObject<Element>::operator ==(other) &&
m_sk == other.m_sk;
}
bool operator!=(const LPPrivateKeyImpl& other) const { return ! (*this == other); }
private:
static const size_t intsInID = 128 / (sizeof(uint32_t) * 8);
static string GenerateUniqueKeyID() {
std::uniform_int_distribution<uint32_t> distribution(0, std::numeric_limits<uint32_t>::max());
std::stringstream s;
s.fill('0');
s << std::hex;
for( size_t i = 0; i < intsInID; i++ )
s << std::setw(8) << distribution(PseudoRandomNumberGenerator::GetPRNG());
return s.str();
}
Element m_sk;
};
template <class Element>
class LPKeyPair {
public:
LPPublicKey<Element> publicKey;
LPPrivateKey<Element> secretKey;
LPKeyPair(LPPublicKeyImpl<Element>* a=0, LPPrivateKeyImpl<Element>* b=0): publicKey(a), secretKey(b) {}
bool good() { return publicKey && secretKey; }
};
/**
* @brief Abstract interface for parameter generation algorithm
* @tparam Element a ring element.
*/
template <class Element>
class LPParameterGenerationAlgorithm {
public:
virtual ~LPParameterGenerationAlgorithm() {}
/**
* Method for computing all derived parameters based on chosen primitive parameters
*
* @param *cryptoParams the crypto parameters object to be populated with parameters.
* @param evalAddCount number of EvalAdds assuming no EvalMult and KeySwitch operations are performed.
* @param evalMultCount number of EvalMults assuming no EvalAdd and KeySwitch operations are performed.
* @param keySwitchCount number of KeySwitch operations assuming no EvalAdd and EvalMult operations are performed.
*/
virtual bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0,
int32_t evalMultCount = 0, int32_t keySwitchCount = 0) const = 0;
};
/**
* @brief Abstract interface for encryption algorithm
* @tparam Element a ring element.
*/
template <class Element>
class LPEncryptionAlgorithm {
public:
virtual ~LPEncryptionAlgorithm() {}
/**
* Method for encrypting plaintext using LBC
*
* @param&publicKey public key used for encryption.
* @param plaintext copy of the plaintext element. NOTE a copy is passed! That is NOT an error!
* @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false
* @param *ciphertext ciphertext which results from encryption.
*/
virtual Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Element plaintext) const = 0;
/**
* Method for encrypting plaintex using LBC
*
* @param privateKey private key used for encryption.
* @param plaintext copy of the plaintext input. NOTE a copy is passed! That is NOT an error!
* @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false
* @param *ciphertext ciphertext which results from encryption.
*/
virtual Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Element plaintext) const = 0;
/**
* Method for decrypting plaintext using LBC
*
* @param &privateKey private key used for decryption.
* @param &ciphertext ciphertext id decrypted.
* @param *plaintext the plaintext output.
* @return the decoding result.
*/
virtual DecryptResult Decrypt(const LPPrivateKey<Element> privateKey,
const Ciphertext<Element> ciphertext,
NativePoly *plaintext) const = 0;
/**
* Function to generate public and private keys
*
* @param &publicKey private key used for decryption.
* @param &privateKey private key used for decryption.
* @return function ran correctly.
*/
virtual LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse=false) = 0;
};
/**
* @brief Abstract interface for Leveled SHE operations
* @tparam Element a ring element.
*/
template <class Element>
class LPLeveledSHEAlgorithm {
public:
virtual ~LPLeveledSHEAlgorithm() {}
/**
* Method for Modulus Reduction.
*
* @param &cipherText Ciphertext to perform mod reduce on.
*/
virtual Ciphertext<Element> ModReduce(Ciphertext<Element> cipherText) const = 0;
/**
* Method for Ring Reduction.
*
* @param &cipherText Ciphertext to perform ring reduce on.
* @param &privateKey Private key used to encrypt the first argument.
*/
virtual Ciphertext<Element> RingReduce(Ciphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const = 0;
/**
* Method for Composed EvalMult
*
* @param &cipherText1 ciphertext1, first input ciphertext to perform multiplication on.
* @param &cipherText2 cipherText2, second input ciphertext to perform multiplication on.
* @param &quadKeySwitchHint is for resultant quadratic secret key after multiplication to the secret key of the particular level.
* @param &cipherTextResult is the resulting ciphertext that can be decrypted with the secret key of the particular level.
*/
virtual Ciphertext<Element> ComposedEvalMult(
const Ciphertext<Element> cipherText1,
const Ciphertext<Element> cipherText2,
const LPEvalKey<Element> quadKeySwitchHint) const = 0;
/**
* Method for Level Reduction from sk -> sk1. This method peforms a keyswitch on the ciphertext and then performs a modulus reduction.
*
* @param &cipherText1 is the original ciphertext to be key switched and mod reduced.
* @param &linearKeySwitchHint is the linear key switch hint to perform the key switch operation.
* @param &cipherTextResult is the resulting ciphertext.
*/
virtual Ciphertext<Element> LevelReduce(const Ciphertext<Element> cipherText1,
const LPEvalKey<Element> linearKeySwitchHint) const = 0;
/**
* Function that determines if security requirements are met if ring dimension is reduced by half.
*
* @param ringDimension is the original ringDimension
* @param &moduli is the vector of moduli that is used
* @param rootHermiteFactor is the security threshold
*/
virtual bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const = 0;
};
/**
* @brief Abstract interface class for LBC PRE algorithms
* @tparam Element a ring element.
*/
template <class Element>
class LPPREAlgorithm {
public:
virtual ~LPPREAlgorithm() {}
/**
* Virtual function to generate 1..log(q) encryptions for each bit of the original private key.
* Variant that uses the new secret key directly.
*
* @param &newKey new private key for the new ciphertext.
* @param &origPrivateKey original private key used for decryption.
* @param *evalKey the evaluation key.
* @return the re-encryption key.
*/
virtual LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const = 0;
/**
* Virtual function to generate 1..log(q) encryptions for each bit of the original private key
* Variant that uses the public key for the new secret key.
*
* @param &newKey public key for the new secret key.
* @param &origPrivateKey original private key used for decryption.
* @param *evalKey the evaluation key.
* @return the re-encryption key.
*/
virtual LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const = 0;
/**
* Virtual function to define the interface for re-encypting ciphertext using the array generated by ProxyGen
*
* @param &evalKey proxy re-encryption key.
* @param &ciphertext the input ciphertext.
* @param *newCiphertext the new ciphertext.
*/
virtual Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey,
const Ciphertext<Element> ciphertext) const = 0;
};
/**
* @brief Abstract interface class for LBC Multiparty algorithms. A version of this multiparty scheme built on the BGV scheme is seen here:
* - Asharov G., Jain A., López-Alt A., Tromer E., Vaikuntanathan V., Wichs D. (2012) Multiparty Computation with Low Communication, Computation and Interaction via Threshold FHE. In: Pointcheval D., Johansson T. (eds) Advances in Cryptology – EUROCRYPT 2012. EUROCRYPT 2012. Lecture Notes in Computer Science, vol 7237. Springer, Berlin, Heidelberg
*
* During offline key generation, this multiparty scheme relies on the clients coordinating their public key generation. To do this, a single client generates a public-secret key pair.
* This public key is shared with other keys which use an element in the public key to generate their own public keys.
* The clients generate a shared key pair using a scheme-specific approach, then generate re-encryption keys. Re-encryption keys are uploaded to the server.
* Clients encrypt data with their public keys and send the encrypted data server.
* The data is re-encrypted. Computations are then run on the data.
* The result is sent to each of the clients.
* One client runs a "Leader" multiparty decryption operation with its own secret key. All other clients run a regular "Main" multiparty decryption with their own secret key.
* The resulting partially decrypted ciphertext are then fully decrypted with the decryption fusion algorithms.
*
* @tparam Element a ring element.
*/
template <class Element>
class LPMultipartyAlgorithm {
public:
virtual ~LPMultipartyAlgorithm() {}
/**
* Function to generate public and private keys for multiparty homomrophic encryption in coordination with a leading client that generated a first public key.
*
* @param cc cryptocontext for the keys to be generated.
* @param pk1 private key used for decryption to be fused.
* @param makeSparse set to true if ring reduce by a factor of 2 is to be used.
* @param pre set to true if proxy re-encryption is used in multi-party protocol
* @return key pair including the private and public key
*/
virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const LPPublicKey<Element> pk1,
bool makeSparse=false, bool pre=false) = 0;
/**
* Function to generate public and private keys for multiparty homomrophic encryption server key pair in coordination with secret keys of clients.
*
* @param cc cryptocontext for the keys to be generated.
* @param secretkeys private keys used for decryption to be fused.
* @param makeSparse set to true if ring reduce by a factor of 2 is to be used.
* @return key pair including the private and public key
*/
virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const vector<LPPrivateKey<Element>>& secretKeys,
bool makeSparse=false) = 0;
/**
* Method for main decryption operation run by most decryption clients for multiparty homomorphic encryption
*
* @param privateKey private key used for decryption.
* @param ciphertext ciphertext id decrypted.
*/
virtual Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey,
const Ciphertext<Element> ciphertext) const = 0;
/**
* Method for decryption operation run by the lead decryption client for multiparty homomorphic encryption
*
* @param privateKey private key used for decryption.
* @param ciphertext ciphertext id decrypted.
*/
virtual Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey,
const Ciphertext<Element> ciphertext) const = 0;
/**
* Method for fusing the partially decrypted ciphertext.
*
* @param &ciphertextVec ciphertext id decrypted.
* @param *plaintext the plaintext output.
* @return the decoding result.
*/
virtual DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec,
NativePoly *plaintext) const = 0;
};
/**
* @brief Abstract interface class for LBC SHE algorithms
* @tparam Element a ring element.
*/
template <class Element>
class LPSHEAlgorithm {
public:
virtual ~LPSHEAlgorithm() {}
/**
* Virtual function to define the interface for homomorphic addition of ciphertexts.
*
* @param ciphertext1 the input ciphertext.
* @param ciphertext2 the input ciphertext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2) const = 0;
/**
* Virtual function to define the interface for homomorphic addition of ciphertexts.
*
* @param ciphertext the input ciphertext.
* @param plaintext the input plaintext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext,
const Plaintext plaintext) const = 0;
/**
* Virtual function to define the interface for homomorphic subtraction of ciphertexts.
*
* @param ciphertext1 the input ciphertext.
* @param ciphertext2 the input ciphertext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2) const = 0;
/**
* Virtual function to define the interface for homomorphic subtraction of ciphertexts.
*
* @param ciphertext the input ciphertext.
* @param plaintext the input plaintext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext,
const Plaintext plaintext) const = 0;
/**
* Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext.
*
* @param ciphertext1 the input ciphertext.
* @param ciphertext2 the input ciphertext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2) const = 0;
/**
* Virtual function to define the interface for multiplication of ciphertext by plaintext.
*
* @param ciphertext the input ciphertext.
* @param plaintext the input plaintext.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext,
const Plaintext plaintext) const = 0;
/**
* Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key.
*
* @param &ciphertext1 first input ciphertext.
* @param &ciphertext2 second input ciphertext.
* @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2.
* @return the new ciphertext.
*/
virtual Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2, const LPEvalKey<Element> ek) const = 0;
/**
* Virtual function for evaluating multiplication of a ciphertext list which each multiplication is followed by relinearization operation.
*
* @param cipherTextList is the ciphertext list.
* @param evalKeys is the evaluation key to make the newCiphertext
* decryptable by the same secret key as that of ciphertext list.
* @param *newCiphertext the new resulting ciphertext.
*/
virtual Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& cipherTextList,
const vector<LPEvalKey<Element>> &evalKeys) const = 0;
/**
* Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key.
*
* @param ct1 first input ciphertext.
* @param ct2 second input ciphertext.
* @param ek is the evaluation key to make the newCiphertext
* decryptable by the same secret key as that of ciphertext1 and ciphertext2.
* @param *newCiphertext the new resulting ciphertext.
*/
virtual Ciphertext<Element> EvalMultAndRelinearize(const Ciphertext<Element> ct1,
const Ciphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const = 0;
/**
* EvalLinRegression - Computes the parameter vector for linear regression using the least squares method
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const
{
// multiplication is done in reverse order to minimize the number of inner products
Matrix<RationalCiphertext<Element>> xTransposed = x->Transpose();
shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(xTransposed * (*y)));
Matrix<RationalCiphertext<Element>> xCovariance = xTransposed * (*x);
Matrix<RationalCiphertext<Element>> cofactorMatrix = xCovariance.CofactorMatrix();
Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose();
*result = adjugateMatrix * (*result);
RationalCiphertext<Element> determinant;
xCovariance.Determinant(&determinant);
for (size_t row = 0; row < result->GetRows(); row++)
for (size_t col = 0; col < result->GetCols(); col++)
(*result)(row, col).SetDenominator(determinant.GetNumerator());
return result;
}
/**
* Virtual function to define the interface for homomorphic negation of ciphertext.
*
* @param &ciphertext the input ciphertext.
* @param *newCiphertext the new ciphertext.
*/
virtual Ciphertext<Element> EvalNegate(const Ciphertext<Element> ciphertext) const = 0;
/**
* Function to add random noise to all plaintext slots except for the first one; used in EvalInnerProduct
*
* @param &ciphertext the input ciphertext.
* @return modified ciphertext
*/
Ciphertext<Element> AddRandomNoise(const Ciphertext<Element> ciphertext) const {
string kID = ciphertext->GetKeyTag();
const auto cryptoParams = ciphertext->GetCryptoParameters();
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
usint n = elementParams->GetRingDimension();
auto cc = ciphertext->GetCryptoContext();
DiscreteUniformGenerator dug;
dug.SetModulus(encodingParams->GetPlaintextModulus());
BigVector randomVector = dug.GenerateVector(n - 1);
std::vector<uint64_t> randomIntVector(n);
//first plaintext slot does not need to change
randomIntVector[0] = 0;
for (usint i = 0; i < n - 1; i++)
{
randomIntVector[i + 1] = randomVector[i].ConvertToInt();
}
Plaintext plaintext = cc->MakePackedPlaintext(randomIntVector);
plaintext->Encode();
plaintext->GetElement<Element>().SetFormat(EVALUATION);
auto ans = EvalAdd(ciphertext, plaintext);
return ans;
};
/**
* Method for KeySwitchGen
*
* @param &originalPrivateKey Original private key used for encryption.
* @param &newPrivateKey New private key to generate the keyswitch hint.
* @param *KeySwitchHint is where the resulting keySwitchHint will be placed.
*/
virtual LPEvalKey<Element> KeySwitchGen(
const LPPrivateKey<Element> originalPrivateKey,
const LPPrivateKey<Element> newPrivateKey) const = 0;
/**
* Method for KeySwitch
*
* @param &keySwitchHint Hint required to perform the ciphertext switching.
* @param &cipherText Original ciphertext to perform switching on.
*/
virtual Ciphertext<Element> KeySwitch(
const LPEvalKey<Element> keySwitchHint,
const Ciphertext<Element> cipherText) const = 0;
/**
* Method for KeySwitching based on RLWE relinearization (used only for the LTV scheme).
* Function to generate 1..log(q) encryptions for each bit of the original private key
*
* @param &newPublicKey encryption key for the new ciphertext.
* @param origPrivateKey original private key used for decryption.
*/
virtual LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newPublicKey,
const LPPrivateKey<Element> origPrivateKey) const = 0;
/**
* Method for KeySwitching based on RLWE relinearization (used only for the LTV scheme).
*
* @param evalKey the evaluation key.
* @param ciphertext the input ciphertext.
* @return the resulting Ciphertext
*/
virtual Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey,
const Ciphertext<Element> ciphertext) const = 0;
/**
* Virtual function to define the interface for generating a evaluation key which is used after each multiplication.
*
* @param &ciphertext1 first input ciphertext.
* @param &ciphertext2 second input ciphertext.
* @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2.
* @param *newCiphertext the new resulting ciphertext.
*/
virtual LPEvalKey<Element> EvalMultKeyGen(
const LPPrivateKey<Element> originalPrivateKey) const = 0;
/**
* Virtual function to define the interface for generating a evaluation key which is used after each multiplication for depth more than 2.
*
* @param &originalPrivateKey Original private key used for encryption.
* @param *evalMultKeys the resulting evalution key vector list.
*/
virtual vector<LPEvalKey<Element>> EvalMultKeysGen(
const LPPrivateKey<Element> originalPrivateKey) const = 0;
/**
* Virtual function to generate all isomorphism keys for a given private key
*
* @param publicKey encryption key for the new ciphertext.
* @param origPrivateKey original private key used for decryption.
* @param indexList list of automorphism indices to be computed
* @return returns the evaluation keys
*/
virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey,
const LPPrivateKey<Element> origPrivateKey,
const std::vector<usint> &indexList) const = 0;
/**
* Virtual function for evaluating automorphism of ciphertext at index i
*
* @param ciphertext the input ciphertext.
* @param i automorphism index
* @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen.
* @return resulting ciphertext
*/
virtual Ciphertext<Element> EvalAutomorphism(const Ciphertext<Element> ciphertext, usint i,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const = 0;
/**
* Virtual function to generate automophism keys for a given private key; Uses the private key for encryption
*
* @param privateKey private key.
* @param indexList list of automorphism indices to be computed
* @return returns the evaluation keys
*/
virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey,
const std::vector<usint> &indexList) const = 0;
/**
* Virtual function to generate the automorphism keys for EvalSum; works only for packed encoding
*
* @param privateKey private key.
* @return returns the evaluation keys
*/
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen(const LPPrivateKey<Element> privateKey,
const LPPublicKey<Element> publicKey) const
{
const auto cryptoParams = privateKey->GetCryptoParameters();
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
usint batchSize = encodingParams->GetBatchSize();
usint m = elementParams->GetCyclotomicOrder();
// stores automorphism indices needed for EvalSum
std::vector<usint> indices;
if (!(m & (m-1))){ // Check if m is a power of 2
indices = GenerateIndices_2n(batchSize, m);
} else { // Arbitray cyclotomics
usint g = encodingParams->GetPlaintextGenerator();
for (int i = 0; i < floor(log2(batchSize)); i++)
{
indices.push_back(g);
g = (g * g) % m;
}
}
if (publicKey)
// NTRU-based scheme
return EvalAutomorphismKeyGen(publicKey, privateKey, indices);
else
// Regular RLWE scheme
return EvalAutomorphismKeyGen(privateKey, indices);
}
/**
* Sums all elements in log (batch size) time - works only with packed encoding
*
* @param ciphertext the input ciphertext.
* @param batchSize size of the batch to be summed up
* @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalSum(const Ciphertext<Element> ciphertext, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
const shared_ptr<LPCryptoParameters<Element>> cryptoParams = ciphertext->GetCryptoParameters();
Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext));
const auto encodingParams = cryptoParams->GetEncodingParams();
const auto elementParams = cryptoParams->GetElementParams();
usint m = elementParams->GetCyclotomicOrder();
if ((encodingParams->GetBatchSize() == 0))
throw std::runtime_error("EvalSum: Packed encoding parameters 'batch size' is not set; Please check the EncodingParams passed to the crypto context.");
else
{
if (!(m & (m-1))){ // Check if m is a power of 2
newCiphertext = EvalSum_2n(batchSize, m, evalKeys,newCiphertext);
} else { // Arbitray cyclotomics
if (encodingParams->GetPlaintextGenerator() == 0)
throw std::runtime_error("EvalSum: Packed encoding parameters 'plaintext generator' is not set; Please check the EncodingParams passed to the crypto context.");
else
{
usint g = encodingParams->GetPlaintextGenerator();
for (int i = 0; i < floor(log2(batchSize)); i++)
{
auto ea = EvalAutomorphism(newCiphertext, g, evalKeys);
newCiphertext = EvalAdd(newCiphertext, ea);
g = (g * g) % m;
}
}
}
}
return newCiphertext;
}
/**
* Evaluates inner product in batched encoding
*
* @param ciphertext1 first vector.
* @param ciphertext2 second vector.
* @param batchSize size of the batch to be summed up
* @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2, evalMultKey);
result = EvalSum(result, batchSize, evalSumKeys);
// add a random number to all slots except for the first one so that no information is leaked
result = AddRandomNoise(result);
return result;
}
/**
* Evaluates inner product in batched encoding
*
* @param ciphertext1 first vector.
* @param ciphertext2 plaintext.
* @param batchSize size of the batch to be summed up
* @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen.
* @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen.
* @return resulting ciphertext
*/
Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1,
const Plaintext ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const {
Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2);
result = EvalSum(result, batchSize, evalSumKeys);
// add a random number to all slots except for the first one so that no information is leaked
return AddRandomNoise(result);
}
/**
* EvalLinRegressBatched - Computes the parameter vector for linear regression using the least squares method
* Currently supports only two regressors
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const
{
Matrix<RationalCiphertext<Element>> covarianceMatrix(x->GetAllocator(), 2, 2);
Ciphertext<Element> x0 = (*x)(0, 0).GetNumerator();
Ciphertext<Element> x1 = (*x)(0, 1).GetNumerator();
Ciphertext<Element> y0 = (*y)(0, 0).GetNumerator();
//Compute the covariance matrix for X
covarianceMatrix(0, 0).SetNumerator(EvalInnerProduct(x0, x0, batchSize, evalSumKeys, evalMultKey));
covarianceMatrix(0, 1).SetNumerator(EvalInnerProduct(x0, x1, batchSize, evalSumKeys, evalMultKey));
covarianceMatrix(1, 0) = covarianceMatrix(0, 1);
covarianceMatrix(1, 1).SetNumerator(EvalInnerProduct(x1, x1, batchSize, evalSumKeys, evalMultKey));
Matrix<RationalCiphertext<Element>> cofactorMatrix = covarianceMatrix.CofactorMatrix();
Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose();
shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(x->GetAllocator(), 2, 1));
(*result)(0, 0).SetNumerator(EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey));
(*result)(1, 0).SetNumerator(EvalInnerProduct(x1, y0, batchSize, evalSumKeys, evalMultKey));
*result = adjugateMatrix * (*result);
RationalCiphertext<Element> determinant;
covarianceMatrix.Determinant(&determinant);
for (size_t row = 0; row < result->GetRows(); row++)
for (size_t col = 0; col < result->GetCols(); col++)
(*result)(row, col).SetDenominator(determinant.GetNumerator());
return result;
}
/**
* EvalCrossCorrelation - Computes the sliding sum of inner products (known as
* as cross-correlation, sliding inner product, or sliding dot product in
* image processing
* @param x - first vector of row vectors
* @param y - second vector of row vectors
* @param batchSize - batch size for packed encoding
* @param indexStart - starting index in the vectors of row vectors
* @param length - length of the slice in the vectors of row vectors
* @param evalSumKeys - evaluation keys used for the automorphism operation
* @param evalMultKey - the evaluation key used for multiplication
* @return sum(x_i*y_i), i.e., a sum of inner products
*/
Ciphertext<Element>
EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
usint indexStart, usint length,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const
{
if (length == 0)
length = x->GetRows();
if (length - indexStart > x->GetRows())
throw std::runtime_error("The number of rows exceeds the dimension of the vector");
//additional error checking can be added here
Ciphertext<Element> result;
Ciphertext<Element> x0 = (*x)(indexStart, 0).GetNumerator();
Ciphertext<Element> y0 = (*y)(indexStart, 0).GetNumerator();
result = EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey);
#pragma omp parallel for ordered schedule(dynamic)
for (usint i = indexStart + 1; i < indexStart + length; i++)
{
Ciphertext<Element> xi = (*x)(i, 0).GetNumerator();
Ciphertext<Element> yi = (*y)(i, 0).GetNumerator();
auto product = EvalInnerProduct(xi, yi, batchSize, evalSumKeys, evalMultKey);
#pragma omp ordered
{
result = EvalAdd(result,product);
}
}
return result;
}
private:
std::vector<usint> GenerateIndices_2n(usint batchSize, usint m) const {
// stores automorphism indices needed for EvalSum
std::vector<usint> indices;
usint g = 5;
for (int i = 0; i < floor(log2(batchSize)) - 1; i++)
{
indices.push_back(g);
g = (g * g) % m;
}
if (2*batchSize<m)
indices.push_back(g);
indices.push_back(3);
return indices;
}
Ciphertext<Element> EvalSum_2n(usint batchSize, usint m, const std::map<usint, LPEvalKey<Element>> &evalKeys,
const Ciphertext<Element> ciphertext) const{
Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext));
usint g = 5;
for (int i = 0; i < floor(log2(batchSize)) - 1; i++)
{
newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys));
g = (g * g) % m;
}
if (2*batchSize<m)
newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys));
newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, 3, evalKeys));
return newCiphertext;
}
};
/**
* @brief Abstract interface class for LBC SHE algorithms
* @tparam Element a ring element.
*/
template <class Element>
class LPFHEAlgorithm {
public:
virtual ~LPFHEAlgorithm() {}
/**
* Virtual function to define the interface for bootstrapping evaluation of ciphertext
*
* @param &ciphertext the input ciphertext.
* @param *newCiphertext the new ciphertext.
*/
virtual void Bootstrap(const Ciphertext<Element> &ciphertext,
Ciphertext<Element> *newCiphertext) const = 0;
};
/**
* @brief main implementation class to capture essential cryptoparameters of any LBC system
* @tparam Element a ring element.
*/
template <typename Element>
class LPCryptoParameters : public Serializable
{
public:
virtual ~LPCryptoParameters() {}
/**
* Returns the value of plaintext modulus p
*
* @return the plaintext modulus.
*/
const PlaintextModulus &GetPlaintextModulus() const { return m_encodingParams->GetPlaintextModulus(); }
/**
* Returns the reference to IL params
*
* @return the ring element parameters.
*/
const shared_ptr<typename Element::Params> GetElementParams() const { return m_params; }
/**
* Returns the reference to encoding params
*
* @return the encoding parameters.
*/
const EncodingParams GetEncodingParams() const { return m_encodingParams; }
/**
* Sets the value of plaintext modulus p
*/
void SetPlaintextModulus(const PlaintextModulus &plaintextModulus) {
m_encodingParams->SetPlaintextModulus(plaintextModulus);
}
virtual bool operator==(const LPCryptoParameters<Element>& cmp) const = 0;
bool operator!=(const LPCryptoParameters<Element>& cmp) const { return !(*this == cmp); }
/**
* Overload to allow printing of parameters to an iostream
* NOTE that the implementation relies on calling the virtual PrintParameters method
* @param out - the stream to print to
* @param item - reference to the item to print
* @return the stream
*/
friend std::ostream& operator<<(std::ostream& out, const LPCryptoParameters& item) {
item.PrintParameters(out);
return out;
}
virtual usint GetRelinWindow() const { return 0; }
virtual const typename Element::DggType &GetDiscreteGaussianGenerator() const {
throw std::logic_error("No DGG Available for this parameter set");
}
/**
* Sets the reference to element params
*/
void SetElementParams(shared_ptr<typename Element::Params> params) {
m_params = params;
}
/**
* Sets the reference to encoding params
*/
void SetEncodingParams(EncodingParams encodingParams) {
m_encodingParams = encodingParams;
}
protected:
LPCryptoParameters(const PlaintextModulus &plaintextModulus = 2) {
m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) );
}
LPCryptoParameters(shared_ptr<typename Element::Params> params, const PlaintextModulus &plaintextModulus) {
m_params = params;
m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) );
}
LPCryptoParameters(shared_ptr<typename Element::Params> params, EncodingParams encodingParams) {
m_params = params;
m_encodingParams = encodingParams;
}
LPCryptoParameters(LPCryptoParameters<Element> *from, shared_ptr<typename Element::Params> newElemParms) {
*this = *from;
m_params = newElemParms;
}
virtual void PrintParameters(std::ostream& out) const {
out << "Element Parameters: " << *m_params << std::endl;
out << "Encoding Parameters: " << *m_encodingParams << std::endl;
}
private:
//element-specific parameters
shared_ptr<typename Element::Params> m_params;
//encoding-specific parameters
EncodingParams m_encodingParams;
};
/**
* @brief Abstract interface for public key encryption schemes
* @tparam Element a ring element.
*/
template <class Element>
class LPPublicKeyEncryptionScheme {
public:
LPPublicKeyEncryptionScheme() :
m_algorithmParamsGen(0), m_algorithmEncryption(0), m_algorithmPRE(0), m_algorithmMultiparty(0),
m_algorithmSHE(0), m_algorithmFHE(0), m_algorithmLeveledSHE(0) {}
virtual ~LPPublicKeyEncryptionScheme() {
if (this->m_algorithmParamsGen != NULL)
delete this->m_algorithmParamsGen;
if (this->m_algorithmEncryption != NULL)
delete this->m_algorithmEncryption;
if (this->m_algorithmPRE != NULL)
delete this->m_algorithmPRE;
if (this->m_algorithmMultiparty != NULL)
delete this->m_algorithmMultiparty;
if (this->m_algorithmSHE != NULL)
delete this->m_algorithmSHE;
if (this->m_algorithmFHE != NULL)
delete this->m_algorithmFHE;
if (this->m_algorithmLeveledSHE != NULL)
delete this->m_algorithmLeveledSHE;
}
virtual bool operator==(const LPPublicKeyEncryptionScheme& sch) const = 0;
bool operator!=(const LPPublicKeyEncryptionScheme& sch) const {
return !(*this == sch);
}
/**
* Enable features with a bit mast of PKESchemeFeature codes
* @param mask
*/
void Enable(usint mask) {
if (mask&ENCRYPTION) Enable(ENCRYPTION);
if (mask&PRE) Enable(PRE);
if (mask&SHE) Enable(SHE);
if (mask&LEVELEDSHE) Enable(LEVELEDSHE);
if (mask&MULTIPARTY) Enable(MULTIPARTY);
if (mask&FHE) Enable(FHE);
}
usint GetEnabled() const {
usint flag = 0;
if (m_algorithmEncryption != NULL)
flag |= ENCRYPTION;
if (m_algorithmPRE != NULL)
flag |= PRE;
if (m_algorithmSHE != NULL)
flag |= SHE;
if (m_algorithmFHE != NULL)
flag |= FHE;
if (m_algorithmLeveledSHE != NULL)
flag |= LEVELEDSHE;
if (m_algorithmMultiparty != NULL)
flag |= MULTIPARTY;
return flag;
}
//instantiated in the scheme implementation class
virtual void Enable(PKESchemeFeature feature) = 0;
/////////////////////////////////////////
// wrapper for LPParameterSelectionAlgorithm
//
bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0,
int32_t evalMultCount = 0, int32_t keySwitchCount = 0) const {
if (this->m_algorithmParamsGen) {
return this->m_algorithmParamsGen->ParamsGen(cryptoParams, evalAddCount, evalMultCount, keySwitchCount);
}
else {
throw std::logic_error("Parameter generation operation has not been implemented");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPEncryptionAlgorithm (ENCRYPT)
//
Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey,
const Element &plaintext) const {
if(this->m_algorithmEncryption) {
return this->m_algorithmEncryption->Encrypt(publicKey,plaintext);
}
else {
throw std::logic_error("Encrypt operation has not been enabled");
}
}
Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey,
const Element &plaintext) const {
if(this->m_algorithmEncryption) {
return this->m_algorithmEncryption->Encrypt(privateKey,plaintext);
}
else {
throw std::logic_error("Encrypt operation has not been enabled");
}
}
DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext,
NativePoly *plaintext) const {
if(this->m_algorithmEncryption)
return this->m_algorithmEncryption->Decrypt(privateKey,ciphertext,plaintext);
else {
throw std::logic_error("Decrypt operation has not been enabled");
}
}
LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse) {
if(this->m_algorithmEncryption) {
auto kp = this->m_algorithmEncryption->KeyGen(cc, makeSparse);
kp.publicKey->SetKeyTag( kp.secretKey->GetKeyTag() );
return kp;
}
else {
throw std::logic_error("KeyGen operation has not been enabled");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPPREAlgorithm (PRE)
//
LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const {
if(this->m_algorithmPRE) {
auto rk = this->m_algorithmPRE->ReKeyGen(newKey,origPrivateKey);
rk->SetKeyTag( newKey->GetKeyTag() );
return rk;
} else {
throw std::logic_error("ReKeyGen operation has not been enabled");
}
}
LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey,
const LPPrivateKey<Element> origPrivateKey) const {
if (this->m_algorithmPRE) {
auto rk = this->m_algorithmPRE->ReKeyGen(newKey,origPrivateKey);
rk->SetKeyTag( newKey->GetKeyTag() );
return rk;
} else {
throw std::logic_error("ReKeyGen operation has not been enabled");
}
}
Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey,
const Ciphertext<Element> ciphertext) const {
if(this->m_algorithmPRE) {
auto ct = this->m_algorithmPRE->ReEncrypt(evalKey,ciphertext);
ct->SetKeyTag( evalKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("ReEncrypt operation has not been enabled");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPMultipartyAlgorithm (Multiparty)
//
// Wrapper for Multiparty Key Gen
// FIXME check key ID for multiparty
LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const LPPublicKey<Element> pk1,
bool makeSparse, bool PRE) {
if(this->m_algorithmMultiparty) {
auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, pk1, makeSparse, PRE);
k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() );
return k;
} else {
throw std::logic_error("MultipartyKeyGen operation has not been enabled");
}
}
// Wrapper for Multiparty Key Gen
// FIXME key IDs for multiparty
LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc,
const vector<LPPrivateKey<Element>>& secretKeys,
bool makeSparse) {
if(this->m_algorithmMultiparty) {
auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, secretKeys, makeSparse);
k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() );
return k;
} else {
throw std::logic_error("MultipartyKeyGen operation has not been enabled");
}
}
// FIXME key IDs for multiparty
Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey,
const Ciphertext<Element> ciphertext) const {
if(this->m_algorithmMultiparty) {
auto ct = this->m_algorithmMultiparty->MultipartyDecryptMain(privateKey,ciphertext);
ct->SetKeyTag( privateKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("MultipartyDecryptMain operation has not been enabled");
}
}
// FIXME key IDs for multiparty
Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey,
const Ciphertext<Element> ciphertext) const {
if(this->m_algorithmMultiparty) {
auto ct = this->m_algorithmMultiparty->MultipartyDecryptLead(privateKey,ciphertext);
ct->SetKeyTag( privateKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("MultipartyDecryptLead operation has not been enabled");
}
}
DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec,
NativePoly *plaintext) const {
if(this->m_algorithmMultiparty) {
return this->m_algorithmMultiparty->MultipartyDecryptFusion(ciphertextVec,plaintext);
} else {
throw std::logic_error("MultipartyDecrypt operation has not been enabled");
}
}
/////////////////////////////////////////
// the three functions below are wrappers for things in LPSHEAlgorithm (SHE)
//
Ciphertext<Element> AddRandomNoise(const Ciphertext<Element> ciphertext) const {
if (this->m_algorithmSHE)
return this->m_algorithmSHE->AddRandomNoise(ciphertext);
else {
throw std::logic_error("AddRandomNoise operation has not been enabled");
}
}
Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, ciphertext2);
return ct;
} else {
throw std::logic_error("EvalAdd operation has not been enabled");
}
}
Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext1,
const Plaintext plaintext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, plaintext);
return ct;
} else {
throw std::logic_error("EvalAdd operation has not been enabled");
}
}
Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, ciphertext2);
return ct;
} else {
throw std::logic_error("EvalSub operation has not been enabled");
}
}
Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext1,
const Plaintext plaintext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, plaintext);
return ct;
} else {
throw std::logic_error("EvalSub operation has not been enabled");
}
}
Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2);
return ct;
} else {
throw std::logic_error("EvalMult operation has not been enabled");
}
}
Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext,
const Plaintext plaintext) const {
if (this->m_algorithmSHE)
return this->m_algorithmSHE->EvalMult(ciphertext, plaintext);
else {
throw std::logic_error("EvalMult operation has not been enabled");
}
}
Ciphertext<Element> EvalMult(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2,
const LPEvalKey<Element> evalKey) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2, evalKey);
return ct;
} else {
throw std::logic_error("EvalMult operation has not been enabled");
}
}
Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& ciphertext, const vector<LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE){
return this->m_algorithmSHE->EvalMultMany(ciphertext, evalKeys);
}
else {
throw std::logic_error("EvalMultMany operation has not been enabled");
}
}
Ciphertext<Element> EvalNegate(const Ciphertext<Element> ciphertext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalNegate(ciphertext);
return ct;
} else {
throw std::logic_error("EvalNegate operation has not been enabled");
}
}
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey,
const LPPrivateKey<Element> origPrivateKey,
const std::vector<usint> &indexList) const {
if (this->m_algorithmSHE) {
auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(publicKey,origPrivateKey,indexList);
for( auto& k : *km )
k.second->SetKeyTag( publicKey->GetKeyTag() );
return km;
} else
throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled");
}
Ciphertext<Element> EvalAutomorphism(const Ciphertext<Element> ciphertext, usint i,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalAutomorphism(ciphertext, i, evalKeys);
return ct;
} else
throw std::logic_error("EvalAutomorphism operation has not been enabled");
}
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey,
const std::vector<usint> &indexList) const {
if (this->m_algorithmSHE) {
auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(privateKey, indexList);
for( auto& k : *km )
k.second->SetKeyTag( privateKey->GetKeyTag() );
return km;
} else
throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled");
}
shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen(
const LPPrivateKey<Element> privateKey,
const LPPublicKey<Element> publicKey) const {
if (this->m_algorithmSHE) {
auto km = this->m_algorithmSHE->EvalSumKeyGen(privateKey,publicKey);
for( auto& k : *km ) {
k.second->SetKeyTag( privateKey->GetKeyTag() );
}
return km;
} else
throw std::logic_error("EvalSumKeyGen operation has not been enabled");
}
Ciphertext<Element> EvalSum(const Ciphertext<Element> ciphertext, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalKeys) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalSum(ciphertext, batchSize, evalKeys);
return ct;
} else
throw std::logic_error("EvalSum operation has not been enabled");
}
Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1,
const Ciphertext<Element> ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys, evalMultKey);
ct->SetKeyTag( evalSumKeys.begin()->second->GetKeyTag() );
return ct;
} else
throw std::logic_error("EvalInnerProduct operation has not been enabled");
}
Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1,
const Plaintext ciphertext2, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const {
if (this->m_algorithmSHE)
return this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys);
else
throw std::logic_error("EvalInnerProduct operation has not been enabled");
}
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
if (this->m_algorithmSHE) {
string kID = evalMultKey->GetKeyTag();
auto ctm = this->m_algorithmSHE->EvalLinRegressBatched(x, y, batchSize, evalSumKeys, evalMultKey);
for( size_t r = 0; r < ctm->GetRows(); r++ )
for( size_t c = 0; c < ctm->GetCols(); c++ )
(*ctm)(r,c).SetKeyTag(kID);
return ctm;
} else
throw std::logic_error("EvalLinRegressionBatched operation has not been enabled");
}
Ciphertext<Element>
EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize,
usint indexStart, usint length,
const std::map<usint, LPEvalKey<Element>> &evalSumKeys,
const LPEvalKey<Element> evalMultKey) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->EvalCrossCorrelation(x, y, batchSize, indexStart, length, evalSumKeys, evalMultKey);
// FIXME: mark with which key?
return ct;
} else
throw std::logic_error("EvalCrossCorrelation operation has not been enabled");
}
/**
* EvalLinRegression - Computes the parameter vector for linear regression using the least squares method
* @param x - matrix of regressors
* @param y - vector of dependent variables
* @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method)
*/
shared_ptr<Matrix<RationalCiphertext<Element>>>
EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x,
const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const
{
if (this->m_algorithmSHE) {
auto ctm = this->m_algorithmSHE->EvalLinRegression(x, y);
// FIXME mark with which key??
return ctm;
} else {
throw std::logic_error("EvalLinRegression operation has not been enabled");
}
}
LPEvalKey<Element> KeySwitchGen(
const LPPrivateKey<Element> originalPrivateKey,
const LPPrivateKey<Element> newPrivateKey) const {
if (this->m_algorithmSHE) {
auto kp = this->m_algorithmSHE->KeySwitchGen(originalPrivateKey, newPrivateKey);
kp->SetKeyTag( newPrivateKey->GetKeyTag() );
return kp;
} else {
throw std::logic_error("KeySwitchGen operation has not been enabled");
}
}
Ciphertext<Element> KeySwitch(
const LPEvalKey<Element> keySwitchHint,
const Ciphertext<Element> cipherText) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->KeySwitch(keySwitchHint, cipherText);
return ct;
}
else {
throw std::logic_error("KeySwitch operation has not been enabled");
}
}
LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const {
if (this->m_algorithmSHE) {
auto kp = this->m_algorithmSHE->KeySwitchRelinGen(newKey, origPrivateKey);
kp->SetKeyTag( newKey->GetKeyTag() );
return kp;
} else {
throw std::logic_error("KeySwitchRelinGen operation has not been enabled");
}
}
Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey,
const Ciphertext<Element> ciphertext) const {
if (this->m_algorithmSHE) {
auto ct = this->m_algorithmSHE->KeySwitchRelin(evalKey, ciphertext);
ct->SetKeyTag( evalKey->GetKeyTag() );
return ct;
} else {
throw std::logic_error("KeySwitchRelin operation has not been enabled");
}
}
LPEvalKey<Element> EvalMultKeyGen(const LPPrivateKey<Element> originalPrivateKey) const {
if(this->m_algorithmSHE) {
auto ek = this->m_algorithmSHE->EvalMultKeyGen(originalPrivateKey);
ek->SetKeyTag( originalPrivateKey->GetKeyTag() );
return ek;
} else {
throw std::logic_error("EvalMultKeyGen operation has not been enabled");
}
}
vector<LPEvalKey<Element>> EvalMultKeysGen(const LPPrivateKey<Element> originalPrivateKey) const {
if(this->m_algorithmSHE){
auto ek = this->m_algorithmSHE->EvalMultKeysGen(originalPrivateKey);
for(size_t i=0; i<ek.size(); i++)
ek[i]->SetKeyTag( originalPrivateKey->GetKeyTag() );
return ek;
}
else {
throw std::logic_error("EvalMultKeysGen operation has not been enabled");
}
}
Ciphertext<Element> EvalMultAndRelinearize(const Ciphertext<Element> ct1,
const Ciphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const {
if(this->m_algorithmSHE)
return this->m_algorithmSHE->EvalMultAndRelinearize(ct1, ct2, ek);
else {
throw std::logic_error("EvalMultAndRelinearize operation has not been enabled");
}
}
/////////////////////////////////////////
// the functions below are wrappers for things in LPFHEAlgorithm (FHE)
//
// TODO: Add Bootstrap and any other FHE methods
/////////////////////////////////////////
// the functions below are wrappers for things in LPSHEAlgorithm (SHE)
//
Ciphertext<Element> ModReduce(Ciphertext<Element> cipherText) const {
if(this->m_algorithmLeveledSHE) {
auto ct = this->m_algorithmLeveledSHE->ModReduce(cipherText);
ct->SetKeyTag( cipherText->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("ModReduce operation has not been enabled");
}
}
Ciphertext<Element> RingReduce(Ciphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const {
if(this->m_algorithmLeveledSHE){
auto ct = this->m_algorithmLeveledSHE->RingReduce(cipherText,keySwitchHint);
ct->SetKeyTag( keySwitchHint->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("RingReduce operation has not been enabled");
}
}
bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const {
if (this->m_algorithmLeveledSHE) {
return this->m_algorithmLeveledSHE->CanRingReduce(ringDimension, moduli, rootHermiteFactor);
}
else {
throw std::logic_error("CanRingReduce operation has not been enabled");
}
}
Ciphertext<Element> ComposedEvalMult(
const Ciphertext<Element> cipherText1,
const Ciphertext<Element> cipherText2,
const LPEvalKey<Element> quadKeySwitchHint) const {
if(this->m_algorithmLeveledSHE){
auto ct = this->m_algorithmLeveledSHE->ComposedEvalMult(cipherText1,cipherText2,quadKeySwitchHint);
ct->SetKeyTag( quadKeySwitchHint->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("ComposedEvalMult operation has not been enabled");
}
}
Ciphertext<Element> LevelReduce(const Ciphertext<Element> cipherText1,
const LPEvalKeyNTRU<Element> linearKeySwitchHint) const {
if(this->m_algorithmLeveledSHE){
auto ct = this->m_algorithmLeveledSHE->LevelReduce(cipherText1,linearKeySwitchHint);
ct->SetKeyTag( linearKeySwitchHint->GetKeyTag() );
return ct;
}
else{
throw std::logic_error("LevelReduce operation has not been enabled");
}
}
const LPEncryptionAlgorithm<Element>& getAlgorithm() const { return *m_algorithmEncryption; }
protected:
LPParameterGenerationAlgorithm<Element> *m_algorithmParamsGen;
LPEncryptionAlgorithm<Element> *m_algorithmEncryption;
LPPREAlgorithm<Element> *m_algorithmPRE;
LPMultipartyAlgorithm<Element> *m_algorithmMultiparty;
LPSHEAlgorithm<Element> *m_algorithmSHE;
LPFHEAlgorithm<Element> *m_algorithmFHE;
LPLeveledSHEAlgorithm<Element> *m_algorithmLeveledSHE;
};
} // namespace lbcrypto ends
#endif
|
mm.c | #include "util.h"
#include "mm.h"
/* Threads shared variables */
int **A;
int **B;
int **C;
int size = SIZE;
int threads = 0;
/* Sequential dummy code */
void start_seq() {
TIME()
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
for (int k = 0; k < size; k++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
ENDTIME()
}
/* Pthread parallel version of dummy sequential */
void *parallel_pthread(void *arg) {
int *id = (int *) arg;
int stripe = size / threads;
int init = (*id) * stripe;
int end = init + stripe;
for (int i = init; i < end; i++) {
for (int j = 0; j < size; j++) {
for (int k = 0; k < size; k++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
return 0;
}
/* OpenMP version of dummy code */
void start_openmp() {
TIME()
#pragma omp parallel for num_threads(threads)
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
for (int k = 0; k < size; k++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
ENDTIME()
}
void init() {
srand(time(0));
A = (int**) malloc(sizeof(int*) * SIZE);
B = (int**) malloc(sizeof(int*) * SIZE);
C = (int**) malloc(sizeof(int*) * SIZE);
for (int i = 0; i < SIZE; i++) {
A[i] = (int*) malloc(sizeof(int) * SIZE);
B[i] = (int*) malloc(sizeof(int) * SIZE);
C[i] = (int*) malloc(sizeof(int) * SIZE);
for (int j = 0; j < SIZE; j++) {
A[i][j] = rand() % 2;
B[i][j] = rand() % 2;
C[i][j] = 0;
}
}
}
int main(int argc, char **argv) {
int prog = atoi(argv[2]);
threads = atoi(argv[1]);
init();
switch (prog) {
case 0: {
start_seq();
freetrix(A);
freetrix(B);
freetrix(C);
break;
}
case 1: {
pthread_t thread[threads];
start_pthread(thread, threads, parallel_pthread);
freetrix(A);
freetrix(B);
freetrix(C);
break;
}
case 2: {
start_openmp();
freetrix(A);
freetrix(B);
freetrix(C);
break;
}
default: {
break;
}
}
return 0;
}
|
unit_cell.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file unit_cell.h
*
* \brief Contains definition and partial implementation of sirius::Unit_cell class.
*/
#ifndef __UNIT_CELL_H__
#define __UNIT_CELL_H__
#include <algorithm>
#include "descriptors.h"
#include "atom_type.h"
#include "atom_symmetry_class.h"
#include "atom.h"
#include "mpi_grid.hpp"
#include "unit_cell_symmetry.h"
#include "simulation_parameters.h"
#include "json.hpp"
namespace sirius {
using json = nlohmann::json;
/// Representation of a unit cell.
class Unit_cell
{
private:
/// Basic parameters of the simulation.
Simulation_parameters const& parameters_;
/// Mapping between atom type label and an ordered internal id in the range [0, \f$ N_{types} \f$).
std::map<std::string, int> atom_type_id_map_;
/// List of atom types.
std::vector<Atom_type> atom_types_;
/// List of atom classes.
std::vector<Atom_symmetry_class> atom_symmetry_classes_;
/// List of atoms.
std::vector<Atom> atoms_;
/// Split index of atoms.
splindex<block> spl_num_atoms_;
/// Global index of atom by index of PAW atom.
std::vector<int> paw_atom_index_;
/// Split index of PAW atoms.
splindex<block> spl_num_paw_atoms_;
/// Split index of atom symmetry classes.
splindex<block> spl_num_atom_symmetry_classes_;
/// Bravais lattice vectors in column order.
/** The following convention is used to transform fractional coordinates to Cartesian:
* \f[
* \vec v_{C} = {\bf L} \vec v_{f}
* \f]
*/
matrix3d<double> lattice_vectors_;
/// Inverse matrix of Bravais lattice vectors.
/** This matrix is used to find fractional coordinates by Cartesian coordinates:
* \f[
* \vec v_{f} = {\bf L}^{-1} \vec v_{C}
* \f]
*/
matrix3d<double> inverse_lattice_vectors_;
/// Reciprocal lattice vectors in column order.
/** The following convention is used:
* \f[
* \vec a_{i} \vec b_{j} = 2 \pi \delta_{ij}
* \f]
* or in matrix notation
* \f[
* {\bf A} {\bf B}^{T} = 2 \pi {\bf I}
* \f]
*/
matrix3d<double> reciprocal_lattice_vectors_;
/// Volume \f$ \Omega \f$ of the unit cell. Volume of Brillouin zone is then \f$ (2\Pi)^3 / \Omega \f$.
double omega_{0};
/// Total volume of the muffin-tin spheres.
double volume_mt_{0};
/// Volume of the interstitial region.
double volume_it_{0};
/// Total nuclear charge.
int total_nuclear_charge_{0};
/// Total number of core electrons.
double num_core_electrons_{0};
/// Total number of valence electrons.
double num_valence_electrons_{0};
/// Total number of electrons.
double num_electrons_{0};
/// List of equivalent atoms, provided externally.
std::vector<int> equivalent_atoms_;
/// Maximum number of muffin-tin points among all atom types.
int max_num_mt_points_{0};
/// Total number of MT basis functions.
int mt_basis_size_{0};
/// Maximum number of MT basis functions among all atoms.
int max_mt_basis_size_{0};
/// Maximum number of MT radial basis functions among all atoms.
int max_mt_radial_basis_size_{0};
/// Total number of augmented wave basis functions in the muffin-tins.
/** This is equal to the total number of matching coefficients for each plane-wave. */
int mt_aw_basis_size_{0};
/// Total number of local orbital basis functions.
int mt_lo_basis_size_{0};
/// Maximum AW basis size among all atoms.
int max_mt_aw_basis_size_{0};
/// Maximum local orbital basis size among all atoms.
int max_mt_lo_basis_size_{0};
/// List of nearest neighbours for each atom.
std::vector<std::vector<nearest_neighbour_descriptor>> nearest_neighbours_;
/// Minimum muffin-tin radius.
double min_mt_radius_{0};
/// Maximum muffin-tin radius.
double max_mt_radius_{0};
/// Maximum orbital quantum number of radial functions between all atom types.
int lmax_{-1};
Communicator_bundle comm_bundle_atoms_;
std::unique_ptr<Unit_cell_symmetry> symmetry_;
Communicator const& comm_;
/// Automatically determine new muffin-tin radii as a half distance between neighbor atoms.
/** In order to guarantee a unique solution muffin-tin radii are dermined as a half distance
* bethween nearest atoms. Initial values of the muffin-tin radii are ignored. */
std::vector<double> find_mt_radii();
/// Check if MT spheres overlap
inline bool check_mt_overlap(int& ia__, int& ja__);
inline int next_atom_type_id(std::string label__)
{
/* check if the label was already added */
if (atom_type_id_map_.count(label__) != 0) {
std::stringstream s;
s << "atom type with label " << label__ << " is already in list";
TERMINATE(s);
}
/* take text id */
atom_type_id_map_[label__] = static_cast<int>(atom_types_.size());
return atom_type_id_map_[label__];
}
public:
Unit_cell(Simulation_parameters const& parameters__, Communicator const& comm__)
: parameters_(parameters__)
, comm_(comm__)
{
}
/// Initialize the unit cell data
/** Several things must be done during this phase:
* 1. Compute number of electrons
* 2. Compute MT basis function indices
* 3. [if needed] Scale MT radii
* 4. Check MT overlap
* 5. Create radial grid for each atom type
* 6. Find symmetry and assign symmetry class to each atom
* 7. Create split indices for atoms and atom classes */
inline void initialize();
/// Add new atom type to the list of atom types and read necessary data from the .json file
inline void add_atom_type(const std::string label, const std::string file_name = "")
{
if (atoms_.size()) {
TERMINATE("Can't add new atom type if atoms are already added");
}
int id = next_atom_type_id(label);
atom_types_.push_back(std::move(Atom_type(parameters_, id, label, file_name)));
}
/// Add new atom to the list of atom types.
inline void add_atom(const std::string label, vector3d<double> position, vector3d<double> vector_field)
{
if (atom_type_id_map_.count(label) == 0) {
std::stringstream s;
s << "atom type with label " << label << " is not found";
TERMINATE(s);
}
if (atom_id_by_position(position) >= 0) {
std::stringstream s;
s << "atom with the same position is already in list" << std::endl
<< " position : " << position[0] << " " << position[1] << " " << position[2];
TERMINATE(s);
}
atoms_.push_back(std::move(Atom(atom_type(label), position, vector_field)));
atom_type(label).add_atom_id(static_cast<int>(atoms_.size()) - 1);
}
/// Add new atom without vector field to the list of atom types.
inline void add_atom(const std::string label, vector3d<double> position)
{
add_atom(label, position, {0, 0, 0});
}
/// Add PAW atoms.
inline void init_paw()
{
for (int ia = 0; ia < num_atoms(); ia++) {
if (atom(ia).type().is_paw()) {
paw_atom_index_.push_back(ia);
}
}
spl_num_paw_atoms_ = splindex<block>(num_paw_atoms(), comm_.size(), comm_.rank());
}
/// Return number of PAW atoms.
inline int num_paw_atoms() const
{
return static_cast<int>(paw_atom_index_.size());
}
/// Get split index of PAW atoms.
inline splindex<block> spl_num_paw_atoms() const
{
return spl_num_paw_atoms_;
}
inline int spl_num_paw_atoms(int idx__) const
{
return spl_num_paw_atoms_[idx__];
}
inline int paw_atom_index(int ipaw__) const
{
return paw_atom_index_[ipaw__];
}
/// Print basic info.
inline void print_info(int verbosity_);
inline unit_cell_parameters_descriptor unit_cell_parameters();
/// Get crystal symmetries and equivalent atoms.
/** Makes a call to spglib providing the basic unit cell information: lattice vectors and atomic types
* and positions. Gets back symmetry operations and a table of equivalent atoms. The table of equivalent
* atoms is then used to make a list of atom symmetry classes and related data. */
inline void get_symmetry();
/// Write structure to CIF file.
inline void write_cif();
/// Write structure to JSON file.
inline json serialize();
/// Set matrix of lattice vectors.
/** Initializes lattice vectors, inverse lattice vector matrix, reciprocal lattice vectors and the
* unit cell volume. */
inline void set_lattice_vectors(matrix3d<double> lattice_vectors__)
{
lattice_vectors_ = lattice_vectors__;
inverse_lattice_vectors_ = inverse(lattice_vectors_);
omega_ = std::abs(lattice_vectors_.det());
reciprocal_lattice_vectors_ = transpose(inverse(lattice_vectors_)) * twopi;
}
/// Set lattice vectors.
inline void set_lattice_vectors(vector3d<double> a0__, vector3d<double> a1__, vector3d<double> a2__)
{
matrix3d<double> lv;
for (int x : {0, 1, 2}) {
lv(x, 0) = a0__[x];
lv(x, 1) = a1__[x];
lv(x, 2) = a2__[x];
}
set_lattice_vectors(lv);
}
/// Find the cluster of nearest neighbours around each atom
inline void find_nearest_neighbours(double cluster_radius);
inline bool is_point_in_mt(vector3d<double> vc, int& ja, int& jr, double& dr, double tp[2]) const;
inline void generate_radial_functions();
inline void generate_radial_integrals();
inline std::string chemical_formula();
inline int atom_id_by_position(vector3d<double> position__)
{
for (int ia = 0; ia < num_atoms(); ia++) {
auto vd = atom(ia).position() - position__;
if (vd.length() < 1e-10) {
return ia;
}
}
return -1;
}
template <typename T>
inline vector3d<double> get_cartesian_coordinates(vector3d<T> a__) const
{
return lattice_vectors_ * a__;
}
inline vector3d<double> get_fractional_coordinates(vector3d<double> a) const
{
return inverse_lattice_vectors_ * a;
}
/// Unit cell volume.
inline double omega() const
{
return omega_;
}
/// Number of atom types.
inline int num_atom_types() const
{
assert(atom_types_.size() == atom_type_id_map_.size());
return static_cast<int>(atom_types_.size());
}
/// Return atom type instance by id.
inline Atom_type& atom_type(int id__)
{
assert(id__ >= 0 && id__ < (int)atom_types_.size());
return atom_types_[id__];
}
/// Return const atom type instance by id.
inline Atom_type const& atom_type(int id__) const
{
assert(id__ >= 0 && id__ < (int)atom_types_.size());
return atom_types_[id__];
}
/// Return atom type instance by label.
inline Atom_type& atom_type(std::string const label__)
{
if (!atom_type_id_map_.count(label__)) {
std::stringstream s;
s << "atom type " << label__ << " is not found";
TERMINATE(s);
}
int id = atom_type_id_map_.at(label__);
return atom_type(id);
}
/// Return const atom type instance by label.
inline Atom_type const& atom_type(std::string const label__) const
{
if (!atom_type_id_map_.count(label__)) {
std::stringstream s;
s << "atom type " << label__ << " is not found";
TERMINATE(s);
}
int id = atom_type_id_map_.at(label__);
return atom_type(id);
}
/// Number of atom symmetry classes.
inline int num_atom_symmetry_classes() const
{
return static_cast<int>(atom_symmetry_classes_.size());
}
/// Return const symmetry class instance by class id.
inline Atom_symmetry_class const& atom_symmetry_class(int id__) const
{
return atom_symmetry_classes_[id__];
}
/// Return symmetry class instance by class id.
inline Atom_symmetry_class& atom_symmetry_class(int id__)
{
return atom_symmetry_classes_[id__];
}
/// Number of atoms in the unit cell.
inline int num_atoms() const
{
return static_cast<int>(atoms_.size());
}
/// Return const atom instance by id.
inline Atom const& atom(int id__) const
{
assert(id__ >= 0 && id__ < (int)atoms_.size());
return atoms_[id__];
}
/// Return atom instance by id.
inline Atom& atom(int id__)
{
assert(id__ >= 0 && id__ < (int)atoms_.size());
return atoms_[id__];
}
inline int total_nuclear_charge() const
{
return total_nuclear_charge_;
}
/// Total number of electrons (core + valence).
inline double num_electrons() const
{
return num_electrons_;
}
/// Number of valence electrons.
inline double num_valence_electrons() const
{
return num_valence_electrons_;
}
/// Number of core electrons.
inline double num_core_electrons() const
{
return num_core_electrons_;
}
/// Maximum number of muffin-tin points among all atom types.
inline int max_num_mt_points() const
{
return max_num_mt_points_;
}
/// Total number of the augmented wave basis functions over all atoms.
inline int mt_aw_basis_size() const
{
return mt_aw_basis_size_;
}
/// Total number of local orbital basis functions over all atoms.
inline int mt_lo_basis_size() const
{
return mt_lo_basis_size_;
}
/// Total number of the muffin-tin basis functions.
/** Total number of MT basis functions equals to the sum of the total number of augmented wave
* basis functions and the total number of local orbital basis functions among all atoms. It controls
* the size of the muffin-tin part of the first-variational states and second-variational wave functions. */
inline int mt_basis_size() const
{
return mt_basis_size_;
}
/// Maximum number of basis functions among all atom types.
inline int max_mt_basis_size() const
{
return max_mt_basis_size_;
}
/// Maximum number of radial functions among all atom types.
inline int max_mt_radial_basis_size() const
{
return max_mt_radial_basis_size_;
}
/// Minimum muffin-tin radius.
inline double min_mt_radius() const
{
return min_mt_radius_;
}
/// Maximum muffin-tin radius.
inline double max_mt_radius() const
{
return max_mt_radius_;
}
/// Maximum number of AW basis functions among all atom types.
inline int max_mt_aw_basis_size() const
{
return max_mt_aw_basis_size_;
}
inline int max_mt_lo_basis_size() const
{
return max_mt_lo_basis_size_;
}
void set_equivalent_atoms(int const* equivalent_atoms__)
{
equivalent_atoms_.resize(num_atoms());
memcpy(&equivalent_atoms_[0], equivalent_atoms__, num_atoms() * sizeof(int));
}
inline splindex<block> const& spl_num_atoms() const
{
return spl_num_atoms_;
}
inline int spl_num_atoms(int i) const
{
return static_cast<int>(spl_num_atoms_[i]);
}
inline splindex<block> const& spl_num_atom_symmetry_classes() const
{
return spl_num_atom_symmetry_classes_;
}
inline int spl_num_atom_symmetry_classes(int i) const
{
return static_cast<int>(spl_num_atom_symmetry_classes_[i]);
}
inline double volume_mt() const
{
return volume_mt_;
}
inline double volume_it() const
{
return volume_it_;
}
inline int lmax() const
{
return lmax_;
}
inline int num_nearest_neighbours(int ia) const
{
return static_cast<int>(nearest_neighbours_[ia].size());
}
inline nearest_neighbour_descriptor const& nearest_neighbour(int i, int ia) const
{
return nearest_neighbours_[ia][i];
}
inline Unit_cell_symmetry const& symmetry() const
{
return (*symmetry_);
}
inline matrix3d<double> const& lattice_vectors() const
{
return lattice_vectors_;
}
inline matrix3d<double> const& inverse_lattice_vectors() const
{
return inverse_lattice_vectors_;
}
inline matrix3d<double> const& reciprocal_lattice_vectors() const
{
return reciprocal_lattice_vectors_;
}
/// Return a single lattice vector.
inline vector3d<double> lattice_vector(int idx__) const
{
return vector3d<double>(lattice_vectors_(0, idx__), lattice_vectors_(1, idx__), lattice_vectors_(2, idx__));
}
inline void import(Unit_cell_input const& inp__)
{
if (inp__.exist_) {
/* first, load all types */
for (int iat = 0; iat < (int)inp__.labels_.size(); iat++) {
auto label = inp__.labels_[iat];
auto fname = inp__.atom_files_.at(label);
add_atom_type(label, fname);
}
/* then load atoms */
for (int iat = 0; iat < (int)inp__.labels_.size(); iat++) {
auto label = inp__.labels_[iat];
auto fname = inp__.atom_files_.at(label);
for (size_t ia = 0; ia < inp__.coordinates_[iat].size(); ia++) {
auto v = inp__.coordinates_[iat][ia];
vector3d<double> p(v[0], v[1], v[2]);
vector3d<double> f(v[3], v[4], v[5]);
add_atom(label, p, f);
}
}
set_lattice_vectors(inp__.a0_, inp__.a1_, inp__.a2_);
}
}
Simulation_parameters const& parameters() const
{
return parameters_;
}
Communicator const& comm() const
{
return comm_;
}
};
inline void Unit_cell::initialize()
{
PROFILE("sirius::Unit_cell::initialize");
/* split number of atom between all MPI ranks */
spl_num_atoms_ = splindex<block>(num_atoms(), comm_.size(), comm_.rank());
/* initialize atom types */
int offs_lo{0};
for (int iat = 0; iat < num_atom_types(); iat++) {
atom_type(iat).init(offs_lo);
max_num_mt_points_ = std::max(max_num_mt_points_, atom_type(iat).num_mt_points());
max_mt_basis_size_ = std::max(max_mt_basis_size_, atom_type(iat).mt_basis_size());
max_mt_radial_basis_size_ = std::max(max_mt_radial_basis_size_, atom_type(iat).mt_radial_basis_size());
max_mt_aw_basis_size_ = std::max(max_mt_aw_basis_size_, atom_type(iat).mt_aw_basis_size());
max_mt_lo_basis_size_ = std::max(max_mt_lo_basis_size_, atom_type(iat).mt_lo_basis_size());
lmax_ = std::max(lmax_, atom_type(iat).indexr().lmax());
offs_lo += atom_type(iat).mt_lo_basis_size();
}
/* find the charges */
for (int i = 0; i < num_atoms(); i++) {
total_nuclear_charge_ += atom(i).zn();
num_core_electrons_ += atom(i).type().num_core_electrons();
num_valence_electrons_ += atom(i).type().num_valence_electrons();
}
num_electrons_ = num_core_electrons_ + num_valence_electrons_;
/* initialize atoms */
for (int ia = 0; ia < num_atoms(); ia++) {
atom(ia).init(mt_aw_basis_size_, mt_lo_basis_size_, mt_basis_size_);
mt_aw_basis_size_ += atom(ia).mt_aw_basis_size();
mt_lo_basis_size_ += atom(ia).mt_lo_basis_size();
mt_basis_size_ += atom(ia).mt_basis_size();
}
assert(mt_basis_size_ == mt_aw_basis_size_ + mt_lo_basis_size_);
auto v0 = lattice_vector(0);
auto v1 = lattice_vector(1);
auto v2 = lattice_vector(2);
double r = std::max(std::max(v0.length(), std::max(v1.length(), v2.length())),
parameters_.parameters_input().nn_radius_);
find_nearest_neighbours(r);
if (parameters_.full_potential()) {
/* find new MT radii and initialize radial grid */
if (parameters_.auto_rmt()) {
std::vector<double> Rmt = find_mt_radii();
for (int iat = 0; iat < num_atom_types(); iat++) {
//atom_type(iat).set_mt_radius(Rmt[iat]);
double r0 = atom_type(iat).radial_grid().first();
atom_type(iat).set_radial_grid(radial_grid_t::exponential_grid, atom_type(iat).num_mt_points(),
r0, Rmt[iat]);
}
}
int ia, ja;
if (check_mt_overlap(ia, ja)) {
std::stringstream s;
s << "overlaping muffin-tin spheres for atoms " << ia << "(" << atom(ia).type().symbol() << ")"
<< " and " << ja << "(" << atom(ja).type().symbol() << ")" << std::endl
<< " radius of atom " << ia << " : " << atom(ia).mt_radius() << std::endl
<< " radius of atom " << ja << " : " << atom(ja).mt_radius() << std::endl
<< " distance : " << nearest_neighbours_[ia][1].distance << " " << nearest_neighbours_[ja][1].distance;
TERMINATE(s);
}
min_mt_radius_ = 1e100;
max_mt_radius_ = 0;
for (int i = 0; i < num_atom_types(); i++) {
min_mt_radius_ = std::min(min_mt_radius_, atom_type(i).mt_radius());
max_mt_radius_ = std::max(max_mt_radius_, atom_type(i).mt_radius());
}
}
if (parameters_.use_symmetry()) {
get_symmetry();
}
spl_num_atom_symmetry_classes_ = splindex<block>(num_atom_symmetry_classes(), comm_.size(), comm_.rank());
volume_mt_ = 0.0;
if (parameters_.full_potential()) {
for (int ia = 0; ia < num_atoms(); ia++) {
volume_mt_ += fourpi * std::pow(atom(ia).mt_radius(), 3) / 3.0;
}
}
volume_it_ = omega() - volume_mt_;
init_paw();
//== write_cif();
//== if (comm().rank() == 0) {
//== std::ofstream ofs(std::string("unit_cell.json"), std::ofstream::out | std::ofstream::trunc);
//== ofs << serialize().dump(4);
//== }
}
inline void Unit_cell::get_symmetry()
{
PROFILE("sirius::Unit_cell::get_symmetry");
if (num_atoms() == 0) {
return;
}
if (atom_symmetry_classes_.size() != 0) {
atom_symmetry_classes_.clear();
for (int ia = 0; ia < num_atoms(); ia++) {
atom(ia).set_symmetry_class(nullptr);
}
}
if (symmetry_ != nullptr) {
TERMINATE("Symmetry() object is already allocated");
}
mdarray<double, 2> positions(3, num_atoms());
mdarray<double, 2> spins(3, num_atoms());
std::vector<int> types(num_atoms());
for (int ia = 0; ia < num_atoms(); ia++) {
auto vp = atom(ia).position();
auto vf = atom(ia).vector_field();
for (int x : {0, 1, 2}) {
positions(x, ia) = vp[x];
spins(x, ia) = vf[x];
}
types[ia] = atom(ia).type_id();
}
symmetry_ = std::unique_ptr<Unit_cell_symmetry>(
new Unit_cell_symmetry(lattice_vectors_, num_atoms(), positions, spins, types, parameters_.spglib_tolerance()));
int atom_class_id{-1};
std::vector<int> asc(num_atoms(), -1);
for (int i = 0; i < num_atoms(); i++) {
/* if symmetry class is not assigned to this atom */
if (asc[i] == -1) {
/* take next id */
atom_class_id++;
atom_symmetry_classes_.push_back(std::move(Atom_symmetry_class(atom_class_id, atoms_[i].type())));
/* scan all atoms */
for (int j = 0; j < num_atoms(); j++) {
bool is_equal = (equivalent_atoms_.size())
? (equivalent_atoms_[j] == equivalent_atoms_[i])
: (symmetry_->atom_symmetry_class(j) == symmetry_->atom_symmetry_class(i));
/* assign new class id for all equivalent atoms */
if (is_equal) {
asc[j] = atom_class_id;
atom_symmetry_classes_.back().add_atom_id(j);
}
}
}
}
for (auto& e : atom_symmetry_classes_) {
for (int i = 0; i < e.num_atoms(); i++) {
int ia = e.atom_id(i);
atoms_[ia].set_symmetry_class(&e);
}
}
assert(num_atom_symmetry_classes() != 0);
}
inline std::vector<double> Unit_cell::find_mt_radii()
{
if (nearest_neighbours_.size() == 0) {
TERMINATE("array of nearest neighbours is empty");
}
std::vector<double> Rmt(num_atom_types(), 1e10);
if (parameters_.auto_rmt() == 1) {
for (int ia = 0; ia < num_atoms(); ia++) {
int id1 = atom(ia).type_id();
if (nearest_neighbours_[ia].size() > 1) {
int ja = nearest_neighbours_[ia][1].atom_id;
int id2 = atom(ja).type_id();
/* don't allow spheres to touch: take a smaller value than half a distance */
double R = std::min(parameters_.rmt_max(), 0.95 * nearest_neighbours_[ia][1].distance / 2);
/* take minimal R for the given atom type */
Rmt[id1] = std::min(R, Rmt[id1]);
Rmt[id2] = std::min(R, Rmt[id2]);
} else {
Rmt[id1] = parameters_.rmt_max();
}
}
}
if (parameters_.auto_rmt() == 2) {
std::vector<double> scale(num_atom_types(), 1e10);
for (int ia = 0; ia < num_atoms(); ia++) {
int id1 = atom(ia).type_id();
if (nearest_neighbours_[ia].size() > 1) {
int ja = nearest_neighbours_[ia][1].atom_id;
int id2 = atom(ja).type_id();
double d = nearest_neighbours_[ia][1].distance;
double s = 0.95 * d / (atom_type(id1).mt_radius() + atom_type(id2).mt_radius());
scale[id1] = std::min(s, scale[id1]);
scale[id2] = std::min(s, scale[id2]);
} else {
scale[id1] = parameters_.rmt_max() / atom_type(id1).mt_radius();
}
}
for (int iat = 0; iat < num_atom_types(); iat++) {
Rmt[iat] = std::min(parameters_.rmt_max(), atom_type(iat).mt_radius() * scale[iat]);
}
}
/* Suppose we have 3 different atoms. First we determint Rmt between 1st and 2nd atom,
* then we determine Rmt between (let's say) 2nd and 3rd atom and at this point we reduce
* the Rmt of the 2nd atom. This means that the 1st atom gets a possibility to expand if
* it is far from the 3rd atom. */
bool inflate = true;
if (inflate) {
std::vector<bool> scale_Rmt(num_atom_types(), true);
for (int ia = 0; ia < num_atoms(); ia++) {
int id1 = atom(ia).type_id();
if (nearest_neighbours_[ia].size() > 1) {
int ja = nearest_neighbours_[ia][1].atom_id;
int id2 = atom(ja).type_id();
double dist = nearest_neighbours_[ia][1].distance;
if (Rmt[id1] + Rmt[id2] > dist * 0.94) {
scale_Rmt[id1] = false;
scale_Rmt[id2] = false;
}
}
}
for (int ia = 0; ia < num_atoms(); ia++) {
int id1 = atom(ia).type_id();
if (nearest_neighbours_[ia].size() > 1) {
int ja = nearest_neighbours_[ia][1].atom_id;
int id2 = atom(ja).type_id();
double dist = nearest_neighbours_[ia][1].distance;
if (scale_Rmt[id1]) {
Rmt[id1] = std::min(parameters_.rmt_max(), 0.95 * (dist - Rmt[id2]));
}
}
}
}
for (int i = 0; i < num_atom_types(); i++) {
if (Rmt[i] < 0.3) {
std::stringstream s;
s << "muffin-tin radius for atom type " << i << " (" << atom_types_[i].label()
<< ") is too small: " << Rmt[i];
TERMINATE(s);
}
}
return Rmt;
}
inline bool Unit_cell::check_mt_overlap(int& ia__, int& ja__)
{
if (num_atoms() != 0 && nearest_neighbours_.size() == 0) {
TERMINATE("array of nearest neighbours is empty");
}
for (int ia = 0; ia < num_atoms(); ia++) {
/* first atom is always the central one itself */
if (nearest_neighbours_[ia].size() <= 1) {
continue;
}
int ja = nearest_neighbours_[ia][1].atom_id;
double dist = nearest_neighbours_[ia][1].distance;
if (atom(ia).mt_radius() + atom(ja).mt_radius() >= dist) {
ia__ = ia;
ja__ = ja;
return true;
}
}
return false;
}
inline void Unit_cell::print_info(int verbosity_)
{
printf("\n");
printf("Unit cell\n");
for (int i = 0; i < 80; i++) {
printf("-");
}
printf("\n");
printf("lattice vectors\n");
for (int i = 0; i < 3; i++) {
printf(" a%1i : %18.10f %18.10f %18.10f \n", i + 1, lattice_vectors_(0, i), lattice_vectors_(1, i),
lattice_vectors_(2, i));
}
printf("reciprocal lattice vectors\n");
for (int i = 0; i < 3; i++) {
printf(" b%1i : %18.10f %18.10f %18.10f \n", i + 1, reciprocal_lattice_vectors_(0, i),
reciprocal_lattice_vectors_(1, i), reciprocal_lattice_vectors_(2, i));
}
printf("\n");
printf("unit cell volume : %18.8f [a.u.^3]\n", omega());
printf("1/sqrt(omega) : %18.8f\n", 1.0 / sqrt(omega()));
printf("MT volume : %f (%5.2f%%)\n", volume_mt(), volume_mt() * 100 / omega());
printf("IT volume : %f (%5.2f%%)\n", volume_it(), volume_it() * 100 / omega());
printf("\n");
printf("number of atom types : %i\n", num_atom_types());
for (int i = 0; i < num_atom_types(); i++) {
int id = atom_type(i).id();
printf("type id : %i symbol : %2s mt_radius : %10.6f\n", id, atom_type(i).symbol().c_str(),
atom_type(i).mt_radius());
}
printf("number of atoms : %i\n", num_atoms());
printf("number of symmetry classes : %i\n", num_atom_symmetry_classes());
if (!parameters_.full_potential()) {
printf("number of PAW atoms : %i\n", num_paw_atoms());
}
if (verbosity_ >= 2) {
printf("\n");
printf("atom id position vector_field type id class id\n");
printf("----------------------------------------------------------------------------------------\n");
for (int i = 0; i < num_atoms(); i++) {
auto pos = atom(i).position();
auto vf = atom(i).vector_field();
printf("%6i %f %f %f %f %f %f %6i %6i\n", i, pos[0], pos[1], pos[2], vf[0], vf[1], vf[2],
atom(i).type_id(), atom(i).symmetry_class_id());
}
printf("\n");
for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) {
printf("class id : %i atom id : ", ic);
for (int i = 0; i < atom_symmetry_class(ic).num_atoms(); i++) {
printf("%i ", atom_symmetry_class(ic).atom_id(i));
}
printf("\n");
}
printf("\n");
printf("atom id position (Cartesian, a.u.)\n");
printf("----------------------------------------------------------------------------------------\n");
for (int i = 0; i < num_atoms(); i++) {
auto pos = atom(i).position();
auto vc = get_cartesian_coordinates(pos);
printf("%6i %18.12f %18.12f %18.12f\n", i, vc[0], vc[1], vc[2]);
}
printf("\n");
for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) {
printf("class id : %i atom id : ", ic);
for (int i = 0; i < atom_symmetry_class(ic).num_atoms(); i++) {
printf("%i ", atom_symmetry_class(ic).atom_id(i));
}
printf("\n");
}
}
if (symmetry_ != nullptr) {
printf("\n");
printf("space group number : %i\n", symmetry_->spacegroup_number());
printf("international symbol : %s\n", symmetry_->international_symbol().c_str());
printf("Hall symbol : %s\n", symmetry_->hall_symbol().c_str());
printf("number of operations : %i\n", symmetry_->num_mag_sym());
printf("transformation matrix : \n");
auto tm = symmetry_->transformation_matrix();
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
printf("%12.6f ", tm(i, j));
}
printf("\n");
}
printf("origin shift : \n");
auto t = symmetry_->origin_shift();
printf("%12.6f %12.6f %12.6f\n", t[0], t[1], t[2]);
if (verbosity_ >= 2) {
printf("symmetry operations : \n");
for (int isym = 0; isym < symmetry_->num_mag_sym(); isym++) {
auto R = symmetry_->magnetic_group_symmetry(isym).spg_op.R;
auto t = symmetry_->magnetic_group_symmetry(isym).spg_op.t;
auto S = symmetry_->magnetic_group_symmetry(isym).spin_rotation;
printf("isym : %i\n", isym);
printf("R : ");
for (int i = 0; i < 3; i++) {
if (i) {
printf(" ");
}
for (int j = 0; j < 3; j++) {
printf("%3i ", R(i, j));
}
printf("\n");
}
printf("T : ");
for (int j = 0; j < 3; j++) {
printf("%8.4f ", t[j]);
}
printf("\n");
printf("S : ");
for (int i = 0; i < 3; i++) {
if (i) {
printf(" ");
}
for (int j = 0; j < 3; j++) {
printf("%8.4f ", S(i, j));
}
printf("\n");
}
printf("\n");
}
}
}
}
inline unit_cell_parameters_descriptor Unit_cell::unit_cell_parameters()
{
unit_cell_parameters_descriptor d;
vector3d<double> v0(lattice_vectors_(0, 0), lattice_vectors_(1, 0), lattice_vectors_(2, 0));
vector3d<double> v1(lattice_vectors_(0, 1), lattice_vectors_(1, 1), lattice_vectors_(2, 1));
vector3d<double> v2(lattice_vectors_(0, 2), lattice_vectors_(1, 2), lattice_vectors_(2, 2));
d.a = v0.length();
d.b = v1.length();
d.c = v2.length();
d.alpha = std::acos(dot(v1, v2) / d.b / d.c) * 180 / pi;
d.beta = std::acos(dot(v0, v2) / d.a / d.c) * 180 / pi;
d.gamma = std::acos(dot(v0, v1) / d.a / d.b) * 180 / pi;
return d;
}
inline void Unit_cell::write_cif()
{
if (comm_.rank() == 0) {
FILE* fout = fopen("unit_cell.cif", "w");
auto d = unit_cell_parameters();
fprintf(fout, "_cell_length_a %f\n", d.a);
fprintf(fout, "_cell_length_b %f\n", d.b);
fprintf(fout, "_cell_length_c %f\n", d.c);
fprintf(fout, "_cell_angle_alpha %f\n", d.alpha);
fprintf(fout, "_cell_angle_beta %f\n", d.beta);
fprintf(fout, "_cell_angle_gamma %f\n", d.gamma);
// fprintf(fout, "loop_\n");
// fprintf(fout, "_symmetry_equiv_pos_as_xyz\n");
fprintf(fout, "loop_\n");
fprintf(fout, "_atom_site_label\n");
fprintf(fout, "_atom_type_symbol\n");
fprintf(fout, "_atom_site_fract_x\n");
fprintf(fout, "_atom_site_fract_y\n");
fprintf(fout, "_atom_site_fract_z\n");
for (int ia = 0; ia < num_atoms(); ia++) {
auto pos = atom(ia).position();
fprintf(fout, "%i %s %f %f %f\n", ia + 1, atom(ia).type().label().c_str(), pos[0], pos[1], pos[2]);
}
fclose(fout);
}
}
inline json Unit_cell::serialize()
{
json dict;
dict["lattice_vectors"] = {{lattice_vectors_(0, 0), lattice_vectors_(1, 0), lattice_vectors_(2, 0)},
{lattice_vectors_(0, 1), lattice_vectors_(1, 1), lattice_vectors_(2, 1)},
{lattice_vectors_(0, 2), lattice_vectors_(1, 2), lattice_vectors_(2, 2)}};
dict["atom_types"] = json::array();
for (int iat = 0; iat < num_atom_types(); iat++) {
dict["atom_types"].push_back(atom_type(iat).label());
}
dict["atom_files"] = json::object();
for (int iat = 0; iat < num_atom_types(); iat++) {
dict["atom_files"][atom_type(iat).label()] = atom_type(iat).file_name();
}
dict["atoms"] = json::object();
for (int iat = 0; iat < num_atom_types(); iat++) {
dict["atoms"][atom_type(iat).label()] = json::array();
for (int i = 0; i < atom_type(iat).num_atoms(); i++) {
int ia = atom_type(iat).atom_id(i);
auto v = atom(ia).position();
dict["atoms"][atom_type(iat).label()].push_back({v[0], v[1], v[2]});
}
}
return std::move(dict);
}
inline void Unit_cell::find_nearest_neighbours(double cluster_radius)
{
PROFILE("sirius::Unit_cell::find_nearest_neighbours");
auto max_frac_coord = find_translations(cluster_radius, lattice_vectors_);
nearest_neighbours_.clear();
nearest_neighbours_.resize(num_atoms());
#pragma omp parallel for default(shared)
for (int ia = 0; ia < num_atoms(); ia++) {
auto iapos = get_cartesian_coordinates(atom(ia).position());
std::vector<nearest_neighbour_descriptor> nn;
std::vector<std::pair<double, int>> nn_sort;
for (int i0 = -max_frac_coord[0]; i0 <= max_frac_coord[0]; i0++) {
for (int i1 = -max_frac_coord[1]; i1 <= max_frac_coord[1]; i1++) {
for (int i2 = -max_frac_coord[2]; i2 <= max_frac_coord[2]; i2++) {
nearest_neighbour_descriptor nnd;
nnd.translation[0] = i0;
nnd.translation[1] = i1;
nnd.translation[2] = i2;
auto vt = get_cartesian_coordinates<int>(nnd.translation);
for (int ja = 0; ja < num_atoms(); ja++) {
nnd.atom_id = ja;
auto japos = get_cartesian_coordinates(atom(ja).position());
vector3d<double> v = japos + vt - iapos;
nnd.distance = v.length();
if (nnd.distance <= cluster_radius) {
nn.push_back(nnd);
nn_sort.push_back(std::pair<double, int>(nnd.distance, (int)nn.size() - 1));
}
}
}
}
}
std::sort(nn_sort.begin(), nn_sort.end());
nearest_neighbours_[ia].resize(nn.size());
for (int i = 0; i < (int)nn.size(); i++) {
nearest_neighbours_[ia][i] = nn[nn_sort[i].second];
}
}
if (parameters_.control().print_neighbors_ && comm_.rank() == 0) {
printf("Nearest neighbors\n");
printf("=================\n");
for (int ia = 0; ia < num_atoms(); ia++) {
printf("Central atom: %s (%i)\n", atom(ia).type().symbol().c_str(), ia);
for (int i = 0; i < 80; i++) {
printf("-");
}
printf("\n");
printf("atom ( id) D [a.u.] translation\n");
for (int i = 0; i < 80; i++) {
printf("-");
}
printf("\n");
for (int i = 0; i < (int)nearest_neighbours_[ia].size(); i++) {
int ja = nearest_neighbours_[ia][i].atom_id;
printf("%4s (%4i) %12.6f %4i %4i %4i\n", atom(ja).type().symbol().c_str(), ja,
nearest_neighbours_[ia][i].distance, nearest_neighbours_[ia][i].translation[0],
nearest_neighbours_[ia][i].translation[1], nearest_neighbours_[ia][i].translation[2]);
}
printf("\n");
}
}
}
inline bool Unit_cell::is_point_in_mt(vector3d<double> vc, int& ja, int& jr, double& dr, double tp[2]) const
{
/* reduce coordinates to the primitive unit cell */
auto vr = reduce_coordinates(get_fractional_coordinates(vc));
for (int ia = 0; ia < num_atoms(); ia++) {
for (int i0 = -1; i0 <= 1; i0++) {
for (int i1 = -1; i1 <= 1; i1++) {
for (int i2 = -1; i2 <= 1; i2++) {
/* atom position */
vector3d<double> posf = vector3d<double>(i0, i1, i2) + atom(ia).position();
/* vector connecting center of atom and reduced point */
vector3d<double> vf = vr.first - posf;
/* convert to spherical coordinates */
auto vs = SHT::spherical_coordinates(get_cartesian_coordinates(vf));
if (vs[0] < atom(ia).mt_radius()) {
ja = ia;
tp[0] = vs[1]; // theta
tp[1] = vs[2]; // phi
if (vs[0] < atom(ia).type().radial_grid(0)) {
jr = 0;
dr = 0.0;
} else {
for (int ir = 0; ir < atom(ia).num_mt_points() - 1; ir++) {
if (vs[0] >= atom(ia).type().radial_grid(ir) &&
vs[0] < atom(ia).type().radial_grid(ir + 1)) {
jr = ir;
dr = vs[0] - atom(ia).type().radial_grid(ir);
break;
}
}
}
return true;
}
}
}
}
}
ja = -1;
jr = -1;
return false;
}
inline void Unit_cell::generate_radial_functions()
{
PROFILE("sirius::Unit_cell::generate_radial_functions");
for (int icloc = 0; icloc < (int)spl_num_atom_symmetry_classes().local_size(); icloc++) {
int ic = spl_num_atom_symmetry_classes(icloc);
atom_symmetry_class(ic).generate_radial_functions(parameters_.valence_relativity());
}
for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) {
int rank = spl_num_atom_symmetry_classes().local_rank(ic);
atom_symmetry_class(ic).sync_radial_functions(comm_, rank);
}
if (parameters_.control().verbosity_ >= 1) {
runtime::pstdout pout(comm_);
for (int icloc = 0; icloc < (int)spl_num_atom_symmetry_classes().local_size(); icloc++) {
int ic = spl_num_atom_symmetry_classes(icloc);
atom_symmetry_class(ic).write_enu(pout);
}
if (comm_.rank() == 0) {
printf("\n");
printf("Linearization energies\n");
}
}
if (parameters_.control().verbosity_ >= 4 && comm_.rank() == 0) {
for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) {
atom_symmetry_class(ic).dump_lo();
}
}
}
inline void Unit_cell::generate_radial_integrals()
{
PROFILE("sirius::Unit_cell::generate_radial_integrals");
for (int icloc = 0; icloc < spl_num_atom_symmetry_classes().local_size(); icloc++) {
int ic = spl_num_atom_symmetry_classes(icloc);
atom_symmetry_class(ic).generate_radial_integrals(parameters_.valence_relativity());
}
for (int ic = 0; ic < num_atom_symmetry_classes(); ic++) {
int rank = spl_num_atom_symmetry_classes().local_rank(ic);
atom_symmetry_class(ic).sync_radial_integrals(comm_, rank);
}
for (int ialoc = 0; ialoc < spl_num_atoms_.local_size(); ialoc++) {
int ia = spl_num_atoms_[ialoc];
atom(ia).generate_radial_integrals(parameters_.processing_unit(), mpi_comm_self());
}
for (int ia = 0; ia < num_atoms(); ia++) {
int rank = spl_num_atoms().local_rank(ia);
atom(ia).sync_radial_integrals(comm_, rank);
}
}
inline std::string Unit_cell::chemical_formula()
{
std::string name;
for (int iat = 0; iat < num_atom_types(); iat++) {
name += atom_type(iat).symbol();
int n = 0;
for (int ia = 0; ia < num_atoms(); ia++) {
if (atom(ia).type_id() == atom_type(iat).id())
n++;
}
if (n != 1) {
std::stringstream s;
s << n;
name = (name + s.str());
}
}
return name;
}
} // namespace sirius
#endif // __UNIT_CELL_H__
|
calculate_water_fraction.h | #if !defined(KRATOS_CALCULATE_WATER_FRACTION_UTILITY_INCLUDED )
#define KRATOS_CALCULATE_WATER_FRACTION_UTILITY_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// Project includes
#include "includes/define.h"
#include "pfem_2_application_variables.h"
#include "utilities/math_utils.h"
#include "utilities/geometry_utilities.h"
#include "includes/ublas_interface.h"
#include "includes/variables.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/element.h"
#include "utilities/enrichment_utilities.h"
namespace Kratos
{
template< unsigned int TDim>
class CalculateWaterFraction
{
public:
KRATOS_CLASS_POINTER_DEFINITION(CalculateWaterFraction);
CalculateWaterFraction(ModelPart& model_part)
: mr_model_part(model_part) //mr_model_part is saved as private variable (declared at the end of the file)
{
KRATOS_TRY
//std::cout << "Hello, I am the constructor of the Utility" << std::endl;
KRATOS_CATCH("")
}
~CalculateWaterFraction()
{}
/*
double Calculate() //water fraction
{
KRATOS_TRY
//double area; //we create the needed variables
double sum_area=0.0;
double sum_water_area=0.0;
//double one_third=1.0/3.0;
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
#pragma omp parallel for reduction(+:sum_area) reduction(+:sum_water_area)
for(unsigned int ii=0; ii<mr_model_part.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
const double & nodal_area = inode->FastGetSolutionStepValue(NODAL_AREA); //resetting the temperature
sum_area += nodal_area;
if ((inode->FastGetSolutionStepValue(DISTANCE))<0.0)
sum_water_area += nodal_area;
}
const double water_fraction = sum_water_area / sum_area;
//std::cout << "Finished, the mean temperature is" << water_fraction << std::endl; //we print the result
return water_fraction;
KRATOS_CATCH("")
}
*/
double Calculate()
{
KRATOS_TRY
double sum_areas=1.0e-100;
//double sum_temperatures=0.0;
//double nodal_weight=1.0/(1.0+double(TDim));
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for reduction(+:sum_areas)
for(int kkk=0; kkk<number_of_threads; kkk++)
{
double thread_sum_areas=0.0;
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double Area;
BoundedMatrix<double, (TDim+1), TDim > DN_DX;
array_1d<double, (TDim+1) > N;
Geometry<Node<3> >& geom = ielem->GetGeometry();
GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Area);
//sum_areas+=area;
int negative_nodes=0;
int positive_nodes=0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
if(geom[k].FastGetSolutionStepValue(DISTANCE)<0.0)
negative_nodes++;
else
positive_nodes++;
}
if (negative_nodes==(TDim+1))
thread_sum_areas+=Area;
else if (negative_nodes>0)
{
array_1d<double,(TDim+1)> distances;
for (unsigned int i = 0; i < (TDim+1); i++)
{
distances[i] = geom[i].FastGetSolutionStepValue(DISTANCE);
}
BoundedMatrix<double,3*(TDim-1), 2> Nenriched;
array_1d<double,(3*(TDim-1))> volumes;
BoundedMatrix<double,(TDim+1), TDim > coords;
BoundedMatrix<double, 3*(TDim-1), (TDim+1) > Ngauss;
array_1d<double,(3*(TDim-1))> signs;
std::vector< Matrix > gauss_gradients(3*(TDim-1));
//fill coordinates
//unsigned int single_triangle_node = 1;
for (unsigned int i = 0; i < (TDim+1); i++)
{
const array_1d<double, 3 > & xyz = geom[i].Coordinates();
for (unsigned int j = 0; j < TDim; j++)
coords(i,j)=xyz(j);
}
for (unsigned int i = 0; i < 3*(TDim-1); i++)
gauss_gradients[i].resize(2, TDim, false); //2 values of the 2 shape functions, and derivates in (xy) direction).
unsigned int ndivisions = EnrichmentUtilities::CalculateEnrichedShapeFuncions(coords, DN_DX, distances, volumes, Ngauss, signs, gauss_gradients, Nenriched);
for (unsigned int i=0;i!=ndivisions;i++)
if (signs(i)<0.0) thread_sum_areas+=volumes(i);
}
}
sum_areas = thread_sum_areas;
}
return sum_areas;
KRATOS_CATCH("")
}
double CalculateWaterHeight(double x_position)
{
KRATOS_TRY
double all_threads_water_height=-100000000.0;
const double tolerance=0.001;
const double upper_limit=x_position+tolerance;
const double lower_limit=x_position-tolerance;
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
double local_thread_water_height=-100000000.0;
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double & distance = (inode->FastGetSolutionStepValue(DISTANCE));
if ( distance <0.0)
{
if ((inode->X())<upper_limit && (inode->X())>lower_limit && (inode->Y())>local_thread_water_height)
local_thread_water_height = (inode->Y());
}
//now we search for the node given certain criteria
}
if ( local_thread_water_height > all_threads_water_height )
{
#pragma omp critical
{
if ( local_thread_water_height > all_threads_water_height ) all_threads_water_height = local_thread_water_height;
}
}
}
return all_threads_water_height;
KRATOS_CATCH("")
}
double CalculateMaxCourant()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const double delta_t = CurrentProcessInfo[DELTA_TIME];
double all_threads_max_courant = 0.0;
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
double local_thread_max_courant = 0.0;
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//double & distance = (inode->FastGetSolutionStepValue(DISTANCE));
if ((ielem->GetValue(VELOCITY_OVER_ELEM_SIZE))>local_thread_max_courant)
local_thread_max_courant = (ielem->GetValue(VELOCITY_OVER_ELEM_SIZE));
}
if ( local_thread_max_courant > all_threads_max_courant )
{
#pragma omp critical
{
if ( local_thread_max_courant > all_threads_max_courant ) all_threads_max_courant = local_thread_max_courant;
}
}
}
all_threads_max_courant *= delta_t * 1.414;
return all_threads_max_courant;
KRATOS_CATCH("")
}
double CalculateMaxCourantInNegativeElements()
{
KRATOS_TRY
//using a nodal approach (faster!)
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const double delta_t = CurrentProcessInfo[DELTA_TIME];
double all_threads_max_courant = 0.0;
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
double local_thread_max_courant = 0.0;
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
const double & distance = (inode->FastGetSolutionStepValue(DISTANCE));
const double velocity = sqrt(pow(inode->FastGetSolutionStepValue(VELOCITY_X),2)+pow(inode->FastGetSolutionStepValue(VELOCITY_Y),2)+pow(inode->FastGetSolutionStepValue(VELOCITY_Z),2));
const double nodal_courant = (velocity*delta_t/inode->FastGetSolutionStepValue(MEAN_SIZE));
if(nodal_courant>local_thread_max_courant && distance < 0.0) //only for negative nodes!
local_thread_max_courant = nodal_courant;
}
if ( local_thread_max_courant > all_threads_max_courant )
{
#pragma omp critical
{
if ( local_thread_max_courant > all_threads_max_courant ) all_threads_max_courant = local_thread_max_courant;
}
}
}
//all_threads_max_courant *= delta_t * 1.414;
return all_threads_max_courant;
KRATOS_CATCH("")
}
double CalculateMeanCourant() //water fraction
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const double delta_t = CurrentProcessInfo[DELTA_TIME];
//double area=0.0; //we create the needed variables
//double number_of_threads = double(OpenMPUtils::GetNumThreads());
double sum_courant=0.0;
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for reduction(+:sum_courant)
for(int kkk=0; kkk<number_of_threads; kkk++)
{
double thread_sum_courant=0.0;
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//const double & velocity_over_elem_size = (ielem->GetValue(VELOCITY_OVER_ELEM_SIZE));
if ((ielem->GetValue(VELOCITY_OVER_ELEM_SIZE))>0.0)
thread_sum_courant += ielem->GetValue(VELOCITY_OVER_ELEM_SIZE);
}
sum_courant += thread_sum_courant;
}
sum_courant *= delta_t * 1.414 / double(mr_model_part.Elements().size());
return sum_courant;
KRATOS_CATCH("")
}
//NOW ONLY VISCOUS. but since in the first step we cannot use the pressure we just add the viscoust forces. still, lines to use pressure can be uncommented
double CalculateForce(int direction) //
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double viscosity = CurrentProcessInfo[VISCOSITY];
//double delta_t = CurrentProcessInfo[DELTA_TIME];
//array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY];
const array_1d<double,3> zero3 = ZeroVector(3);
double nodal_weight = 1.0/ (double (TDim) );
double force=0.0;
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for reduction(+:force)
for(int kkk=0; kkk<number_of_threads; kkk++)
{
double thread_force=0.0;
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
if (ielem->Is(ACTIVE)) //elements can be inactive to add temporary walls. fractional velocity is integrated by parts so walls are seen as having zero velocity without doing anything
{
//double Area;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<unsigned int, 4 > fixed_nodes; //unordered : i position in the array might not correspond to i node of the element
array_1d<bool, 4 > is_node_fixed; //i position belongs to i node of the element
unsigned int number_of_fixed_nodes=0;
//bool boundary_element=false;
BoundedMatrix<double, 4, 3 > velocities = ZeroMatrix(4, 3);
for (unsigned int i = 0; i < (TDim+1); i++)
{
const array_1d<double, 3 > & velocity = geom[i].FastGetSolutionStepValue(VELOCITY);
for (unsigned int j = 0; j < (TDim); j++)
velocities(i,j) = velocity[j];
if (TDim==2)
{
if (geom[i].IsFixed(FRACT_VEL_X) && geom[i].IsFixed(FRACT_VEL_Y))
{
fixed_nodes[number_of_fixed_nodes]=i;
is_node_fixed[i]=true;
number_of_fixed_nodes++;
}
else
is_node_fixed[i]=false;
}
else // (TDim==3)
{
if (geom[i].IsFixed(FRACT_VEL_X) && geom[i].IsFixed(FRACT_VEL_Y) && geom[i].IsFixed(FRACT_VEL_Z) )
{
fixed_nodes[number_of_fixed_nodes]=i;
number_of_fixed_nodes++;
is_node_fixed[i]=true;
}
else
is_node_fixed[i]=false;
}
}
double plane_point_distance=1.0;
double fixed_face_area_or_lenght=0.0;
array_1d<double, 3 > boundary_stress;
if (number_of_fixed_nodes==TDim) //it means we have cutted elements!
{
//boundary_element=true;
array_1d<double, 3 > normal;
unsigned int free_node=0;
if (TDim==2)
{
fixed_face_area_or_lenght = fabs(sqrt(pow((geom[fixed_nodes[1]].Y()-geom[fixed_nodes[0]].Y()),2 ) + pow( (geom[fixed_nodes[1]].X()-geom[fixed_nodes[0]].X() ),2 ) ) );
normal[0] = geom[fixed_nodes[1]].Y()-geom[fixed_nodes[0]].Y();
normal[1] = - ( geom[fixed_nodes[1]].X()-geom[fixed_nodes[0]].X() );
normal[2] = 0.0;
normal /= sqrt(normal[0]*normal[0]+normal[1]*normal[1]);
if (fixed_nodes[0]==0)
{
if (fixed_nodes[1]==1)
free_node=2;
else
free_node=1;
}
else
free_node=0;
//the plane is composed by the unit normal and any of the points of fixed nodes. we will use fixed_nodes[0];
plane_point_distance = inner_prod( (geom[free_node].Coordinates()-geom[fixed_nodes[0]].Coordinates()) , normal);
//boundary_stress = geom[free_node].FastGetSolutionStepValue(VELOCITY)*viscosity/plane_point_distance;
if (plane_point_distance<0.0)
{
plane_point_distance*=-1.0;
normal *= -1.0;
}
}
else //(TDim==3)
{
//the area is obtained from the crossproduct of the 2 vertices:
MathUtils<double>::CrossProduct(normal, geom[fixed_nodes[1]].Coordinates() - geom[fixed_nodes[0]].Coordinates(), geom[fixed_nodes[2]].Coordinates() - geom[fixed_nodes[0]].Coordinates() );
fixed_face_area_or_lenght = 0.5 * sqrt( pow(normal[0],2) + pow(normal[1],2) + pow(normal[2],2) );
normal /= 2.0 * fixed_face_area_or_lenght; //this way it is a unit vector. now we must find the distance from the plane generated by the triangles to the free node:
//fixed_face_area_or_lenght = fabs(fixed_face_area_or_lenght);
for (unsigned int j=0; j!=(TDim+1); j++)
{
if (is_node_fixed[j]==false)
{
free_node=j;
break;
}
}
//the plane is composed by the unit normal and any of the points of fixed nodes. we will use fixed_nodes[0];
plane_point_distance = inner_prod( (geom[free_node].Coordinates()-geom[fixed_nodes[0]].Coordinates()) , normal);
if (plane_point_distance<0.0)
normal *= -1.0;
{
plane_point_distance*=-1.0;
normal *= -1.0;
}
//boundary_stress = geom[free_node].FastGetSolutionStepValue(VELOCITY)*viscosity/plane_point_distance;
}
boundary_stress = - geom[free_node].FastGetSolutionStepValue(VELOCITY)*viscosity/(fabs(plane_point_distance));
//KRATOS_WATCH(plane_point_distance)
//KRATOS_WATCH(boundary_stress)
//KRATOS_WATCH(fixed_face_area_or_lenght)
//drag forces:
thread_force += boundary_stress[direction]*fixed_face_area_or_lenght; // unit density! careful!
//face_force+=fixed_face_area_or_lenght*normal[direction];
//now pressure forces:
for (unsigned int j=0; j!=(TDim); j++) // the 2 or 3 nodes that define the fixed face:
{
/*
if ( (geom[fixed_nodes[j]].X())<5.0 )
face_force += nodal_weight*(0.5*fixed_face_area_or_lenght*normal[direction]);
else
face_force -= nodal_weight*(0.5*fixed_face_area_or_lenght*normal[direction]);
*/
thread_force +=nodal_weight*(geom[fixed_nodes[j]].FastGetSolutionStepValue(PRESSURE))*fixed_face_area_or_lenght*normal[direction];
//array_1d<double,3> & nodal_normal= (geom[fixed_nodes[j]].FastGetSolutionStepValue(NORMAL));
//face_force += (geom[fixed_nodes[j]].FastGetSolutionStepValue(PRESSURE))*nodal_normal[direction];
}
}
}
}
force+=thread_force;
}
return force;
KRATOS_CATCH("")
}
protected:
private:
ModelPart& mr_model_part;
};
} // namespace Kratos.
#endif // KRATOS_CALCULATE__WATER_FRACTION_UTILITY_INCLUDED defined
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int32_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
GeneralizedIsolationTree.h | #ifndef GENIF_GENERALIZEDISOLATIONTREE_H
#define GENIF_GENERALIZEDISOLATIONTREE_H
#include "GIFExitCondition.h"
#include "GIFModel.h"
#include "Tree.h"
#include <chrono>
#include <genif/Learner.h>
#include <genif/OutlierDetectionResult.h>
#include <nanoflann.hpp>
#include <random>
#include <set>
namespace genif {
class GeneralizedIsolationTree : public Learner<GIFModel, OutlierDetectionResult> {
public:
/**
* Constructs an instance of GeneralizedIsolationTree.
* @param k The number of representatives to find for each node.
* @param exitCondition An exit condition, which controls, when tree induction is stopped.
* @param workerCount Number of workers to consider.
* @param seed Seed to use for random number generation (-1 defaults to sysclock seed). Pass an integer for constant result across multiple runs.
*/
GeneralizedIsolationTree(unsigned int k, const GIFExitCondition& exitCondition, unsigned int workerCount, int seed = -1) :
_k(k), _workerCount(workerCount), _exitCondition(exitCondition), _seed(seed) {
if (_k <= 1)
throw std::runtime_error("GeneralizedIsolationTree::GeneralizedIsolationTree: k needs to be at least two.");
if (_workerCount < 1)
throw std::runtime_error("GeneralizedIsolationTree::GeneralizedIsolationTree: workerCount needs to be at least one.");
}
/**
* Fits the tree using a given dataset.
* @param dataset The dataset to use for fitting.
* @return A reference to this object.
*/
Learner<GIFModel, OutlierDetectionResult>& fit(const MatrixX& dataset) override {
// Check, whether we have enough observations.
if (dataset.rows() < _k)
throw std::runtime_error("GeneralizedIsolationTree::fit: The dataset should have at least k = " + std::to_string(_k) + " observations but has "
+ std::to_string(dataset.rows()) + " observations.");
// Assign the tree to this object.
Tree* treeRoot = findTree(dataset);
// Find leafs.
std::vector<unsigned int> leafVectorIndices;
std::function<void(const Tree&)> findRepresentatives = [&findRepresentatives, &leafVectorIndices](const Tree& node) {
if (!node.nodes.empty()) {
for (auto& childNode : node.nodes)
findRepresentatives(*childNode);
} else
leafVectorIndices.push_back(node.representativeIndex);
};
findRepresentatives(*treeRoot);
// Delete the tree since we do not need it anymore.
delete treeRoot;
// Create a GIFModel instance.
GIFModel resultModel;
// Build matrix from leaf nodes.
resultModel.dataMatrix = std::make_shared<MatrixX>(leafVectorIndices.size(), dataset.cols());
for (unsigned int i = 0; i < leafVectorIndices.size(); i++)
resultModel.dataMatrix->row(i) = dataset.row(leafVectorIndices[i]);
// Build KDTree on summary.
auto kdTree = std::make_shared<nanoflann::KDTreeEigenMatrixAdaptor<MatrixX>>(resultModel.dataMatrix->cols(), std::cref(*resultModel.dataMatrix), 10);
kdTree->index->buildIndex();
// Iterate through the dataset and determine for each vector the nearest vectors in the summary.
resultModel.countsPerRegion = std::vector<unsigned long>(resultModel.dataMatrix->rows(), 0);
#pragma omp parallel for num_threads(_workerCount)
for (unsigned long i = 0; i < dataset.rows(); i++) {
// Make KNN query for nearest summary vector.
size_t nearestSummaryIndex;
data_t sqDistance;
nanoflann::KNNResultSet<data_t> resultSet(1);
resultSet.init(&nearestSummaryIndex, &sqDistance);
VectorX datasetVector = dataset.row(i);
kdTree->index->findNeighbors(resultSet, datasetVector.data(), nanoflann::SearchParams(10));
// Increase count for nearest summary point.
#pragma omp critical
resultModel.countsPerRegion[nearestSummaryIndex] += 1;
}
// Calculate estimated probabilities for every region.
resultModel.probabilitiesPerRegion = std::vector<data_t>(resultModel.dataMatrix->rows(), 0.0);
for (unsigned long i = 0; i < resultModel.dataMatrix->rows(); i++)
resultModel.probabilitiesPerRegion[i] = static_cast<data_t>(resultModel.countsPerRegion[i]) / static_cast<data_t>(dataset.size());
// Assign properties.
resultModel.dataKDTree = kdTree;
_model = resultModel;
return *this;
}
/**
* Finds a tree using a given dataset.
* @param dataset The dataset to create the tree from.
* @return A raw pointer to the induced tree.
*/
Tree* findTree(const MatrixX& dataset) {
// Create PRNG.
std::default_random_engine generator(_seed >= 0 ? _seed : std::chrono::system_clock::now().time_since_epoch().count());
// Initialize a tree.
Tree* treeRoot = new Tree(dataset);
for (unsigned int i = 0; i < dataset.rows(); i++)
treeRoot->vectorIndices.emplace_back(i);
// Choose a (somewhat) random representative.
treeRoot->representativeIndex = 0;
// Create a worker data structure.
std::vector<std::pair<unsigned int, Tree*>> treeTasks;
treeTasks.emplace_back(0, treeRoot);
while (!treeTasks.empty()) {
// Choose a root node to work on.
auto task = treeTasks.back();
treeTasks.pop_back();
unsigned int treeHeight = task.first;
Tree* root = task.second;
// Check, whether the exit condition already applies.
bool shouldExit = _exitCondition.shouldExitRecursion(*root);
if (!shouldExit) {
// Randomly sample representatives from node.
std::set<unsigned int> repIndices;
std::uniform_int_distribution<unsigned int> distribution(0, root->vectorIndices.size() - 1);
for (unsigned int j = 0; j < _k; j++) {
unsigned int nextIndex = root->vectorIndices[distribution(generator)];
while (repIndices.find(nextIndex) != repIndices.end())
nextIndex = root->vectorIndices[distribution(generator)];
repIndices.insert(nextIndex);
}
std::vector<unsigned int> clusterRepIndices(repIndices.begin(), repIndices.end());
// Generate clustering.
std::vector<std::vector<unsigned int>> clusters(clusterRepIndices.size());
#pragma omp parallel for num_threads(_workerCount)
for (unsigned int i = 0; i < root->vectorIndices.size(); i++) {
unsigned int fvIndex = root->vectorIndices[i];
unsigned int nearestIdx;
data_t nearestDist = std::numeric_limits<data_t>::max();
for (unsigned int j = 0; j < clusterRepIndices.size(); j++) {
data_t repDist = (dataset.row(fvIndex) - dataset.row(clusterRepIndices.at(j))).squaredNorm();
if (repDist < nearestDist) {
nearestDist = repDist;
nearestIdx = j;
}
}
// Put vector in bucket.
#pragma omp critical
clusters[nearestIdx].push_back(fvIndex);
}
// Every partition becomes a new node.
// Check, whether we have found exactly K clusters.
if (clusters.size() == _k) {
// Iterate all clusters and create new nodes from it.
for (unsigned int i = 0; i < clusters.size(); i++) {
// Create a new node.
Tree* node = new Tree(dataset);
node->vectorIndices = clusters[i];
node->representativeIndex = clusterRepIndices[i];
node->parent = root;
// Assign node to root.
root->nodes.push_back(node);
// If we have more than k observations in that node, we may create new tasks, which then are subject to further partitioning.
if (node->vectorIndices.size() > _k)
treeTasks.push_back(std::make_pair<unsigned int, Tree*>(treeHeight + 1, &*node));
}
} else
throw std::runtime_error("GeneralizedIsolationTree::fit: Clusterer did not return k = " + std::to_string(_k) + " clusters from "
+ std::to_string(root->vectorIndices.size()) + " observations.");
}
}
return treeRoot;
}
/**
* Returns a previously fitted model.
* @return As stated above.
*/
GIFModel getModel() const override {
return _model;
}
/**
* Predicts the outlierness for a given dataset using a previously fitted model.
* @param dataset The dataset to inspect for outliers.
* @return An instance of OutlierDetectionResult which contains the probabilities for individual observations to be inliers.
*/
OutlierDetectionResult predict(const MatrixX& dataset) const override {
return predict(dataset, _model);
}
/**
* Predicts the outlierness for a given dataset using a previously fitted model.
* @param dataset The dataset to inspect for outliers.
* @param model The model to use for prediction.
* @return An instance of OutlierDetectionResult which contains the probabilities for individual observations to be inliers.
*/
OutlierDetectionResult predict(const MatrixX& dataset, const GIFModel& model) const override {
if (!model.probabilitiesPerRegion.empty()) {
// Create a result model.
OutlierDetectionResult result;
result.probabilities = VectorX::Zero(dataset.rows());
// Make the anomaly decision for every data point.
#pragma omp parallel for num_threads(_workerCount)
for (unsigned long i = 0; i < dataset.rows(); i++) {
// Make KNN query for nearest summary vector.
size_t nearestSummaryIndex;
data_t sqDistance;
nanoflann::KNNResultSet<data_t> resultSet(1);
resultSet.init(&nearestSummaryIndex, &sqDistance);
VectorX datasetVector = dataset.row(i);
model.dataKDTree->index->findNeighbors(resultSet, datasetVector.data(), nanoflann::SearchParams(10));
// Assign probability values.
result.probabilities[i] = model.probabilitiesPerRegion[nearestSummaryIndex];
}
return result;
} else
throw std::runtime_error("GeneralizedIsolationTree:predict: No model has been learnt yet. Please call `fit` or `fitPredict` first.");
}
/**
* Takes a copy of this object.
* @return An unique_ptr pointing to a copy of this instance.
*/
std::unique_ptr<Learner<GIFModel, OutlierDetectionResult>> copy() const override {
return std::make_unique<GeneralizedIsolationTree>(_k, _exitCondition, _workerCount, _seed);
}
private:
unsigned int _k = 10;
unsigned int _workerCount = 1;
int _seed;
const GIFExitCondition& _exitCondition;
GIFModel _model;
};
}
#endif // GENIF_GENERALIZEDISOLATIONTREE_H
|
GB_unop__identity_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: (none)
// op(A') function: GB_unop_tran__identity_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
evolution.h | //
// Created by mattw on 11/01/2022.
//
#ifndef BECPP_EVOLUTION_H
#define BECPP_EVOLUTION_H
#include <complex>
#include "wavefunction.h"
#include "data.h"
constexpr std::complex<double> I{0, 1};
void apply_TF_density(Wavefunction2D &psi, const Parameters ¶ms)
{
double tf_density;
double g = params.c0 + 4 * params.c2;
double r_tf = std::pow(8 * g / PI, 0.25);
for (int i = 0; i < psi.grid.nx; ++i)
{
for (int j = 0; j < psi.grid.ny; ++j)
{
double r2 = psi.grid.X[i][j] * psi.grid.X[i][j] + psi.grid.Y[i][j] * psi.grid.Y[i][j];
if (r2 < r_tf)
{
tf_density = 15 / (8 * PI * r_tf) * (1 - r2 / (r_tf * r_tf));
} else
{
tf_density = 0.;
}
psi.plus[j + i * psi.grid.ny] *= tf_density;
psi.zero[j + i * psi.grid.ny] *= tf_density;
psi.minus[j + i * psi.grid.ny] *= tf_density;
}
}
psi.update_component_atom_num();
}
void fourier_step(Wavefunction2D &psi, const Parameters ¶ms)
{
#pragma omp parallel for collapse(2) shared(psi, params, I) default(none)
for (int i = 0; i < psi.grid.nx; ++i)
{
for (int j = 0; j < psi.grid.ny; ++j)
{
psi.plus_k[j + i * psi.grid.ny] *= exp(-0.25 * I * params.dt * (psi.grid.K[i][j] + 2 * params.q));
psi.zero_k[j + i * psi.grid.ny] *= exp(-0.25 * I * params.dt * psi.grid.K[i][j]);
psi.minus_k[j + i * psi.grid.ny] *= exp(-0.25 * I * params.dt * (psi.grid.K[i][j] + 2 * params.q));
}
}
}
void fourier_step(Wavefunction1D &psi, const Parameters ¶ms)
{
#pragma omp parallel for shared(psi, params, I) default(none)
for (int i = 0; i < psi.grid.nx; ++i)
{
psi.plus_k[i] *= exp(-0.25 * I * params.dt * (psi.grid.K[i] + 2 * params.q));
psi.zero_k[i] *= exp(-0.25 * I * params.dt * psi.grid.K[i]);
psi.minus_k[i] *= exp(-0.25 * I * params.dt * (psi.grid.K[i] + 2 * params.q));
}
}
void fourier_step_KZ(Wavefunction1D &psi, const Parameters ¶ms, int tau_q)
{
#pragma omp parallel for shared(psi, params, I, tau_q) default(none)
for (int i = 0; i < psi.grid.nx; ++i)
{
psi.plus_k[i] *= exp(-0.25 * I * params.dt *
(psi.grid.K[i] + 2 * abs(params.c2) * (params.q - params.dt.real() / (2 * tau_q))));
psi.zero_k[i] *= exp(-0.25 * I * params.dt * psi.grid.K[i]);
psi.minus_k[i] *= exp(-0.25 * I * params.dt *
(psi.grid.K[i] + 2 * abs(params.c2) * (params.q - params.dt.real() / (2 * tau_q))));
}
}
void interaction_step(Wavefunction2D &psi, const Parameters ¶ms, const doubleArray_t &V)
{
#pragma omp parallel for collapse(2) shared(psi, params, I, V) default(none)
for (int i = 0; i < psi.grid.nx; ++i)
{
for (int j = 0; j < psi.grid.ny; ++j)
{
// Calculate spin vector elements
std::complex<double> f_perp =
sqrt(2.) * (std::conj(psi.plus[j + i * psi.grid.ny]) * psi.zero[j + i * psi.grid.ny]
+ std::conj(psi.zero[j + i * psi.grid.ny]) * psi.minus[j + i * psi.grid.ny]);
std::complex<double> f_z = std::pow(abs(psi.plus[j + i * psi.grid.ny]), 2) -
std::pow(abs(psi.minus[j + i * psi.grid.ny]), 2);
double F = sqrt(std::pow(abs(f_z), 2) + std::pow(abs(f_perp), 2));
// Calculate trigonometric expressions
std::complex<double> C = std::cos(params.c2 * F * params.dt);
std::complex<double> S{};
if (F > 1e-8)
{
S = I * std::sin(params.c2 * F * params.dt) / F;
}
// Calculate density
double n = std::pow(abs(psi.plus[j + i * psi.grid.ny]), 2) +
std::pow(abs(psi.zero[j + i * psi.grid.ny]), 2) +
std::pow(abs(psi.minus[j + i * psi.grid.ny]), 2);
// Solve interaction part of flow
std::complex<double> new_psi_plus = (C * psi.plus[j + i * psi.grid.ny] -
S * (f_z * psi.plus[j + i * psi.grid.ny]
+ std::conj(f_perp) / sqrt(2.) * psi.zero[j + i * psi.grid.ny]))
* exp(-I * params.dt * (V[i][j] - params.p + params.c0 * n));
// Solve interaction part of flow
std::complex<double> new_psi_zero = (C * psi.zero[j + i * psi.grid.ny] -
S / sqrt(2.) * (f_perp * psi.plus[j + i * psi.grid.ny]
+ std::conj(f_perp) * psi.minus[j + i * psi.grid.ny]))
* exp(-I * params.dt * (V[i][j] + params.c0 * n));
// Solve interaction part of flow
std::complex<double> new_psi_minus = (C * psi.minus[j + i * psi.grid.ny] -
S * (f_perp / sqrt(2.) * psi.zero[j + i * psi.grid.ny]
- f_z * psi.minus[j + i * psi.grid.ny]))
* exp(-I * params.dt * (V[i][j] + params.p + params.c0 * n));
// Update wavefunction
psi.plus[j + i * psi.grid.ny] = new_psi_plus;
psi.zero[j + i * psi.grid.ny] = new_psi_zero;
psi.minus[j + i * psi.grid.ny] = new_psi_minus;
}
}
}
void interaction_step(Wavefunction1D &psi, const Parameters ¶ms, const std::vector<double> &V)
{
#pragma omp parallel for shared(psi, params, I, V) default(none)
for (int i = 0; i < psi.grid.nx; ++i)
{
// Calculate spin vector elements
std::complex<double> f_perp =
sqrt(2.) * (std::conj(psi.plus[i]) * psi.zero[i] + std::conj(psi.zero[i]) * psi.minus[i]);
std::complex<double> f_z = std::pow(abs(psi.plus[i]), 2) - std::pow(abs(psi.minus[i]), 2);
double F = sqrt(std::pow(abs(f_z), 2) + std::pow(abs(f_perp), 2));
// Calculate trigonometric expressions
std::complex<double> C = std::cos(params.c2 * F * params.dt);
std::complex<double> S{};
if (F > 1e-8)
{
S = I * std::sin(params.c2 * F * params.dt) / F;
}
// Calculate density
double n = std::pow(abs(psi.plus[i]), 2) +
std::pow(abs(psi.zero[i]), 2) +
std::pow(abs(psi.minus[i]), 2);
// Solve interaction part of flow
std::complex<double> new_psi_plus = (C * psi.plus[i] -
S * (f_z * psi.plus[i] + std::conj(f_perp) / sqrt(2.) * psi.zero[i]))
* exp(-I * params.dt * (V[i] - params.p + params.c0 * n));
// Solve interaction part of flow
std::complex<double> new_psi_zero = (C * psi.zero[i] -
S / sqrt(2.) * (f_perp * psi.plus[i]
+ std::conj(f_perp) * psi.minus[i]))
* exp(-I * params.dt * (V[i] + params.c0 * n));
// Solve interaction part of flow
std::complex<double> new_psi_minus = (C * psi.minus[i] - S * (f_perp / sqrt(2.) * psi.zero[i]
- f_z * psi.minus[i])) *
exp(-I * params.dt * (V[i] + params.p + params.c0 * n));
// Update wavefunction
psi.plus[i] = new_psi_plus;
psi.zero[i] = new_psi_zero;
psi.minus[i] = new_psi_minus;
}
}
void renormalise_atom_num(Wavefunction2D &psi)
{
double current_N_plus = psi.component_atom_number("plus");
double current_N_zero = psi.component_atom_number("zero");
double current_N_minus = psi.component_atom_number("minus");
for (int i = 0; i < psi.grid.nx; ++i)
{
for (int j = 0; j < psi.grid.ny; ++j)
{
psi.plus[j + i * psi.grid.ny] *= sqrt(psi.N_plus) / sqrt(current_N_plus);
psi.zero[j + i * psi.grid.ny] *= sqrt(psi.N_zero) / sqrt(current_N_zero);
psi.minus[j + i * psi.grid.ny] *= sqrt(psi.N_minus) / sqrt(current_N_minus);
}
}
}
void renormalise_atom_num(Wavefunction1D &psi)
{
double current_N_plus = psi.component_atom_number("plus");
double current_N_zero = psi.component_atom_number("zero");
double current_N_minus = psi.component_atom_number("minus");
for (int i = 0; i < psi.grid.nx; ++i)
{
psi.plus[i] *= sqrt(psi.N_plus) / sqrt(current_N_plus);
psi.zero[i] *= sqrt(psi.N_zero) / sqrt(current_N_zero);
psi.minus[i] *= sqrt(psi.N_minus) / sqrt(current_N_minus);
}
}
#endif //BECPP_EVOLUTION_H
|
main.c | #include "common.h"
static void print_help(char *argv)
{
END("%s [-f edge_file] [-W width] [-H height] [-D degree] [-R length] [-o output_file] [-s random_seed]\
[-n calculations] [-w max_temperature] [-c min_temperature] [-g groups] [-C cooling_cycle] [-B] [-d]\
[-F fixed_temperature] [-Y] [-M] [-h]\n", argv);
}
static void set_args(const int argc, char **argv, char *infname, int *low_length, char *outfname,
int *random_seed, long long *ncalcs, double *max_temp, double *min_temp, int *groups,
int *cooling_cycle, bool *enable_hill_climbing, bool *enable_detect_temp, bool *enable_bfs,
bool *enable_halfway, double *fixed_temp, int *width, int *height, int *max_degree)
{
if(argc < 3)
print_help(argv[0]);
int result;
while((result = getopt(argc,argv,"f:W:H:D:R:o:s:n:w:c:g:C:BdF:YMh"))!=-1){
switch(result){
case 'f':
if(strlen(optarg) > MAX_FILENAME_LENGTH)
ERROR("Input filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg);
strcpy(infname, optarg);
break;
case 'W':
*width = atoi(optarg);
if(*width <= 0)
ERROR("-W value > 0\n");
break;
case 'H':
*height = atoi(optarg);
if(*height <= 0)
ERROR("-H value > 0\n");
break;
case 'D':
*max_degree = atoi(optarg);
if(*max_degree <= 0)
ERROR("-D value > 0\n");
break;
case 'R':
*low_length = atoi(optarg);
if(*low_length <= 0)
ERROR("-R value > 0\n");
break;
case 'o':
if(strlen(optarg) > MAX_FILENAME_LENGTH)
ERROR("Output filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg);
strcpy(outfname, optarg);
break;
case 's':
*random_seed = atoi(optarg);
if(*random_seed < 0)
ERROR("-s value >= 0\n");
break;
case 'n':
*ncalcs = atoll(optarg);
if(*ncalcs < 0)
ERROR("-n value >= 0\n");
break;
case 'w':
*max_temp = atof(optarg);
if(*max_temp <= 0)
ERROR("-w value > 0\n");
break;
case 'c':
*min_temp = atof(optarg);
if(*min_temp <= 0)
ERROR("-c value > 0\n");
break;
case 'g':
*groups = atoi(optarg);
if(*groups != 1 && *groups != 2 && *groups != 4)
ERROR("-g value == 1 or 2 or 4\n");
break;
case 'C':
*cooling_cycle = atoi(optarg);
if(*cooling_cycle <= 0)
ERROR("-C value > 0\n");
break;
case 'B':
*enable_bfs = true;
break;
case 'd':
*enable_detect_temp = true;
break;
case 'F':
*fixed_temp = atof(optarg);
if(*fixed_temp <= 0)
ERROR("-F value > 0\n");
break;
case 'Y':
*enable_hill_climbing = true;
break;
case 'M':
*enable_halfway = true;
break;
case 'h':
default:
print_help(argv[0]);
}
}
}
// The "edge" does not have NO_EDGE
static int count_loop(const int lines, const int *edge)
{
int num = 0;
for(int i=0;i<lines;i++)
if(edge[i*2] == edge[i*2+1])
num++;
return num;
}
static bool confirm_dist(const int v, const int w, const int height, const int low_length)
{
return (DISTANCE(v, w, height) <= low_length);
}
static void simple_exchange_edge(const int height, const int low_length, const int lines, int* edge)
{
while(1){
int e1, e2, new_e1_v, new_e1_w, new_e2_v, new_e2_w;
do{
e1 = getRandom(lines);
e2 = getRandom(lines);
} while( e1 == e2 );
int e1_v = edge[e1*2]; int e1_w = edge[e1*2+1];
int e2_v = edge[e2*2]; int e2_w = edge[e2*2+1];
if(confirm_dist(e1_v, e2_v, height, low_length) && confirm_dist(e1_w, e2_w, height, low_length)){
new_e1_v = e1_v; new_e1_w = e2_v;
new_e2_v = e1_w; new_e2_w = e2_w;
}
else if(confirm_dist(e1_v, e2_w, height, low_length) && confirm_dist(e1_w, e2_v, height, low_length)){
new_e1_v = e1_v; new_e1_w = e2_w;
new_e2_v = e1_w; new_e2_w = e2_v;
}
else{
continue;
}
edge[2*e1] = new_e1_v; edge[2*e1+1] = new_e1_w;
edge[2*e2] = new_e2_v; edge[2*e2+1] = new_e2_w;
break;
}
}
#ifdef _OPENMP
static int top_down_step(const int nodes, const int num_frontier, const int max_degree,
const int* degree, const int* restrict adjacency, int* restrict frontier,
int* restrict next, char* restrict bitmap)
{
int count = 0;
int local_frontier[nodes];
#pragma omp parallel private(local_frontier)
{
int local_count = 0;
#pragma omp for nowait
for(int i=0;i<num_frontier;i++){
int v = frontier[i];
for(int j=0;j<degree[v];j++){
int n = *(adjacency + v * max_degree + j); // adjacency[v][j];
if(bitmap[n] == NOT_VISITED){
bitmap[n] = VISITED;
local_frontier[local_count++] = n;
}
}
} // end for i
#pragma omp critical
{
memcpy(&next[count], local_frontier, local_count*sizeof(int));
count += local_count;
}
}
return count;
}
#else
static int top_down_step(const int nodes, const int num_frontier, const int max_degree,
const int *degree, const int* restrict adjacency, int* restrict frontier,
int* restrict next, char* restrict bitmap)
{
int count = 0;
for(int i=0;i<num_frontier;i++){
int v = frontier[i];
for(int j=0;j<degree[v];j++){
int n = *(adjacency + v * max_degree + j); // int n = adjacency[v][j];
if(bitmap[n] == NOT_VISITED){
bitmap[n] = VISITED;
next[count++] = n;
}
}
}
return count;
}
#endif
static int simple_bfs(const int nodes, const int max_degree, const int *degree, int *adjacency)
{
char *bitmap = malloc(sizeof(char) * nodes);
int *frontier = malloc(sizeof(int) * nodes);
int *next = malloc(sizeof(int) * nodes);
int num_frontier = 1, root = 0, num = 0;
for(int i=0;i<nodes;i++)
bitmap[i] = NOT_VISITED;
frontier[0] = root;
bitmap[root] = VISITED;
while(1){
num_frontier = top_down_step(nodes, num_frontier, max_degree, degree,
adjacency, frontier, next, bitmap);
if(num_frontier == 0) break;
int *tmp = frontier;
frontier = next;
next = tmp;
}
for(int i=0;i<nodes;i++)
if(bitmap[i] == NOT_VISITED)
num++;
free(bitmap);
free(frontier);
free(next);
return num;
}
// Inherited from http://research.nii.ac.jp/graphgolf/c/create-lattice.c
static void create_lattice(const int nodes, const int lines, const int width, const int height,
const int max_degree, int *degree, const int low_length, int edge[lines*2])
{
int i = 0;
for(int x=0;x<width/2;x++){
for(int y=0;y<height;y++){
for(int k=0;k<max_degree;k++){
edge[i*2] = y + 2 * x * height;
edge[i*2+1] = edge[2*i] + height;
i++;
}
}
}
if(width%2 == 1){
for(int y=0;y<height/2;y++){
for(int k=0;k<max_degree;k++){
edge[i*2] = (width - 1) * height + 2 * y;
edge[i*2+1] = edge[i*2] + 1;
i++;
}
}
/* add self-loop */
if(height%2 == 1){
for(int k=0;k<max_degree/2;k++){
edge[i*2] = edge[i*2+1] = nodes - 1;
i++;
}
}
}
for(int i=0;i<lines;i++) // Give randomness
simple_exchange_edge(height, low_length, lines, edge);
// Remove loops
int *tmp_edge = malloc(lines*2*sizeof(int));
int min_num = count_loop(lines, edge);
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
simple_exchange_edge(height, low_length, lines, tmp_edge);
int tmp_num = count_loop(lines, tmp_edge);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
}
}
}
// Make an unconnected graph a connected graph
// Note that the connected graph after this operation may have loops.
int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree];
create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])edge, adjacency);
min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency);
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
simple_exchange_edge(height, low_length, lines, tmp_edge);
create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])tmp_edge, adjacency);
int tmp_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
}
}
}
// Remove loops again
if(count_loop(lines, edge) != 0){
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
simple_exchange_edge(height, low_length, lines, tmp_edge);
int tmp_num = count_loop(lines, tmp_edge);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
}
}
}
}
free(tmp_edge);
free(adjacency);
// for(int i=0;i<lines;i++)
// printf("%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height),
// WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height));
//EXIT(0);
}
static int count_lines(const char *fname)
{
FILE *fp = NULL;
if((fp = fopen(fname, "r")) == NULL)
ERROR("File not found\n");
int lines = 0, c;
while((c = fgetc(fp)) != EOF)
if(c == '\n')
lines++;
fclose(fp);
return lines;
}
static void read_file_lattice(int *edge, int *w, int *h, const char *fname)
{
FILE *fp;
if((fp = fopen(fname, "r")) == NULL){
PRINT_R0("File not found\n");
EXIT(1);
}
int n[4];
*w = 0;
*h = 0;
while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){
*w = MAX(*w, n[0]);
*h = MAX(*h, n[1]);
*w = MAX(*w, n[2]);
*h = MAX(*h, n[3]);
}
*w += 1;
*h += 1;
rewind(fp);
int i = 0;
while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){
edge[i*2 ] = n[0] * (*h) + n[1];
edge[i*2+1] = n[2] * (*h) + n[3];
i++;
}
fclose(fp);
}
static int max_node_num(const int lines, const int edge[lines*2])
{
int max = edge[0];
for(int i=1;i<lines*2;i++)
max = MAX(max, edge[i]);
return max;
}
static void verfy_graph(const int nodes, const int lines, const int edge[lines*2], const int height,
const int low_length, const int max_degree)
{
PRINT_R0("Verifing a regular graph... ");
for(int i=0;i<lines;i++){
if(edge[i*2] != NO_EDGE)
if(DISTANCE(edge[i*2], edge[i*2+1], height) > low_length)
ERROR("Over length in line %d: length = %d, distance = %d\n",
i+1, low_length, DISTANCE(edge[i*2], edge[i*2+1], height));
}
int degree[nodes];
for(int i=0;i<nodes;i++)
degree[i] = 0;
for(int i=0;i<lines;i++){
int n1 = edge[i*2 ];
int n2 = edge[i*2+1];
if(n1 != NO_EDGE){
degree[n1]++; if(degree[n1] > max_degree) ERROR("Degree is over %d\n", degree[n1]);
degree[n2]++; if(degree[n2] > max_degree) ERROR("Degree is over %d\n", degree[n2]);
}
}
PRINT_R0("OK\n");
}
static void create_symmetric_edge(int *edge, const int based_nodes, const int based_lines,
const int groups, const int max_degree, int *degree, const int nodes,
const int lines, const int height, const int width, const int based_height,
const int low_length)
{
for(int i=0;i<based_lines;i++)
for(int j=0;j<2;j++)
edge[i*2+j] = WIDTH(edge[i*2+j], based_height) * height + HEIGHT(edge[i*2+j], based_height);
if(groups == 2){
for(int i=0;i<based_lines;i++)
for(int j=0;j<2;j++)
edge[(based_lines+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180);
}
else if(groups == 4){
for(int i=0;i<based_lines;i++){
for(int j=0;j<2;j++){
edge[(based_lines +i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 90);
edge[(based_lines*2+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180);
edge[(based_lines*3+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 270);
}
}
}
int *tmp_edge = malloc(lines*2*sizeof(int));
int *tmp_degree = malloc(nodes*sizeof(int));
int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree];
create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])edge, adjacency);
int min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency);
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
memcpy(tmp_degree, degree, sizeof(int)*nodes);
exchange_edge(nodes, lines, max_degree, tmp_degree, (int (*)[2])tmp_edge, height, width, groups, low_length, 0);
create_adjacency(nodes, lines, max_degree, tmp_degree, (const int (*)[2])tmp_edge, adjacency);
int tmp_num = simple_bfs(nodes, max_degree, tmp_degree, (int *)adjacency);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
memcpy(degree, tmp_degree, sizeof(int)*nodes);
}
}
}
free(tmp_edge);
free(tmp_degree);
free(adjacency);
}
static int dist(const int x1, const int y1, const int x2, const int y2)
{
return(abs(x1 - x2) + abs(y1 - y2));
}
static void lower_bound_of_diam_aspl(int *low_diam, double *low_ASPL, const int m, const int n,
const int max_degree, const int length)
{
int moore[m*n], hist[m*n], mh[m*n];
int mn = m * n, current = max_degree, ii;
double sum = 0;
moore[0] = 1;
moore[1] = max_degree + 1;
for(ii=2;;ii++){
current = current * (max_degree - 1);
moore[ii] = moore[ii-1] + current;
if(moore[ii] >= mn){
moore[ii] = mn;
break;
}
}
int maxhop = MAX((m+n-2+(length-1))/length, ii);
for(int i=ii+1;i<=maxhop;i++)
moore[i] = mn;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
for(int k=0;k<=maxhop;k++)
hist[k] = 0;
for (int i2=0;i2<m;i2++)
for(int j2=0;j2<n;j2++)
hist[(dist(i,j,i2,j2)+length-1)/length]++;
for(int k=1;k<=maxhop;k++)
hist[k] += hist[k-1];
for(int k=0;k<=maxhop;k++)
mh[k] = MIN(hist[k], moore[k]);
for(int k=1;k<=maxhop;k++)
sum += (double)(mh[k] - mh[k-1]) * k;
}
}
int dboth = 0;
for(dboth=0;;dboth++)
if(mh[dboth] == mn)
break;
*low_diam = dboth;
*low_ASPL = sum/((double)mn*(mn-1));
}
static void output_params(const int max_degree, const int groups, const int low_length, const int random_seed,
const double max_temp, const double min_temp, const long long ncalcs,
const int cooling_cycle, const double cooling_rate, const char *infname,
const char *outfname, const double average_time, const bool enable_hill_climbing,
const int width, const int height, const bool enable_bfs, const bool enable_fixed_temp,
const double fixed_temp)
{
#ifdef NDEBUG
PRINT_R0("NO DEBUG MODE\n");
#else
PRINT_R0("DEBUG MODE\n");
#endif
PRINT_R0("Seed : %d\n", random_seed);
PRINT_R0("Processes: %d\n", procs);
#ifdef _OPENMP
PRINT_R0("Threads : %d\n", omp_get_max_threads());
#endif
if(enable_bfs) PRINT_R0("APSP : BFS\n");
else PRINT_R0("APSP : MATRIX Opetation\n");
if(enable_hill_climbing)
PRINT_R0("Algorithm: Hill climbing Method\n");
else{
if(enable_fixed_temp)
PRINT_R0("Algorithm: Fixed Temperature Simulated Annealing : %f\n", fixed_temp);
else
PRINT_R0("Algorithm: Simulated Annealing\n");
PRINT_R0(" MAX Temperature: %f\n", max_temp);
PRINT_R0(" MIN Temperature: %f\n", min_temp);
PRINT_R0(" Cooling Cycle: %d\n", cooling_cycle);
PRINT_R0(" Cooling Rate : %f\n", cooling_rate);
}
if(groups != 1)
PRINT_R0(" Groups : %d\n", groups);
PRINT_R0("Num. of Calulations: %lld\n", ncalcs);
PRINT_R0(" Average APSP time : %f sec.\n", average_time);
PRINT_R0(" Estimated elapse time: %f sec.\n", average_time * ncalcs);
if(infname[0] != NOT_C_DEFINED)
PRINT_R0("Input filename: %s\n", infname);
PRINT_R0(" (w x h, d, r) = (%d x %d, %d, %d)\n", width, height, max_degree, low_length);
if(outfname[0] != NOT_C_DEFINED)
PRINT_R0("Output filename: %s\n", outfname);
PRINT_R0("---\n");
}
static void output_file(FILE *fp, const int lines, const int height, const int edge[lines*2])
{
for(int i=0;i<lines;i++)
if(edge[i*2] != NO_EDGE)
fprintf(fp, "%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height),
WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height));
}
int main(int argc, char *argv[])
{
bool enable_hill_climbing = false, enable_detect_temp = false, enable_bfs = false, enable_halfway = false;
char hostname[MPI_MAX_PROCESSOR_NAME];
char infname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}, outfname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED};
int random_seed = 0, cooling_cycle = 1, groups = 1;
int namelen, based_lines, lines, based_width, based_height, based_nodes, nodes;
int diam = NOT_N_DEFINED, max_degree = NOT_N_DEFINED, low_diam = NOT_N_DEFINED;
int width = NOT_N_DEFINED, height = NOT_N_DEFINED, low_length = NOT_N_DEFINED;
long long ncalcs = DEFAULT_NCALCS, num_accepts = 0;
double ASPL = NOT_N_DEFINED, low_ASPL = NOT_N_DEFINED, cooling_rate = NOT_N_DEFINED, max_diff_energy = NOT_N_DEFINED;
double max_temp = NOT_N_DEFINED, min_temp = NOT_N_DEFINED, fixed_temp = NOT_N_DEFINED;
int *edge = NULL, *degree = NULL;
FILE *fp = NULL;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &procs);
MPI_Get_processor_name(hostname, &namelen);
PRINT_R0("Run on %s\n", hostname);
time_t t = time(NULL);
PRINT_R0("%s---\n", ctime(&t));
// Set arguments
set_args(argc, argv, infname, &low_length, outfname, &random_seed, &ncalcs, &max_temp,
&min_temp, &groups, &cooling_cycle, &enable_hill_climbing, &enable_detect_temp,
&enable_bfs, &enable_halfway, &fixed_temp, &width, &height, &max_degree);
// Set other arguments
bool enable_max_temp = (max_temp != NOT_N_DEFINED);
bool enable_min_temp = (min_temp != NOT_N_DEFINED);
bool enable_fixed_temp = (fixed_temp != NOT_N_DEFINED);
bool enable_infname = (infname[0] != NOT_C_DEFINED);
bool enable_outfname = (outfname[0] != NOT_C_DEFINED);
bool enable_whd = (width != NOT_N_DEFINED && height != NOT_N_DEFINED && max_degree != NOT_N_DEFINED);
// Check arguments
if(low_length == NOT_N_DEFINED) ERROR("Must need -R\n");
else if(enable_hill_climbing && enable_max_temp) ERROR("Both -Y and -w cannot be used.\n");
else if(enable_hill_climbing && enable_min_temp) ERROR("Both -Y and -c cannot be used.\n");
else if(enable_hill_climbing && enable_detect_temp) ERROR("Both -Y and -d cannot be used.\n");
else if(!enable_infname && !enable_whd) ERROR("Must set -f or \"-W and -H and -D\"\n");
else if(enable_halfway && !enable_infname) ERROR("Must set both -M and -f\n");
if(!enable_max_temp) max_temp = 100.0;
if(!enable_min_temp) min_temp = 0.217147;
if(max_temp == min_temp) ERROR("The same values in -w and -c.\n");
if(enable_detect_temp) ncalcs = DEFAULT_DETECT_NCALS;
srandom(random_seed);
if(enable_infname){
ERROR("NOT implement yet\n");
based_lines = count_lines(infname);
lines = (enable_halfway)? based_lines : based_lines * groups;
edge = malloc(sizeof(int)*lines*2); // int edge[lines][2];
read_file_lattice(edge, &based_width, &based_height, infname);
based_nodes = max_node_num(based_lines, (int *)edge) + 1;
if(enable_halfway){
based_nodes /= groups;
based_lines /= groups;
if(groups == 2){
based_height /= 2;
}
else if(groups == 4){
based_width /= 2;
based_height /= 2;
}
}
if(groups == 1){
height = based_height;
width = based_width;
}
else if(groups == 2){
height = based_height * 2;
width = based_width;
}
else{ // groups == 4
height = based_height * 2;
width = based_width * 2;
}
nodes = based_nodes * groups;
max_degree = 2 * lines / nodes;
}
else{
nodes = width * height;
based_nodes = nodes / groups;
lines = nodes * max_degree / 2;
based_lines = lines / groups;
edge = malloc(sizeof(int)*lines*2); // int edge[lines][2];
degree = malloc(sizeof(int)*nodes); // int degree[nodes];
if(groups == 1){
based_width = width;
based_height = height;
}
else if(groups == 2){
based_width = width;
based_height = height/2;
}
else{ // groups == 4
based_width = width/2;
based_height = height/2;
}
}
if(groups == 4 && (based_width != based_height))
ERROR("When g = 4, width(%d) must be equal to height(%d).\n", based_width, based_height);
else if(groups == 4 && width%2 != 0 && height%2 != 0)
ERROR("When g = 4, width(%d) and height(%d) must be divisible by 2.\n", width, height);
else if(groups == 2 && height%2 != 0)
ERROR("When g = 2, height(%d) must be divisible by 2.\n", height);
else if(nodes%groups != 0)
ERROR("nodes(%d) must be divisible by groups(%d)\n", nodes, groups);
else if(lines%groups != 0)
ERROR("(nodes*max_degree/2) must be divisible by groups(%d)\n", groups);
else if(based_width*based_height != based_nodes)
ERROR("Not grid graph (width %d x height %d != nodes %d).\n", based_width, based_height, based_nodes);
if(!enable_infname)
create_lattice(based_nodes, based_lines, based_width, based_height, max_degree, degree, low_length, edge);
int *rotate_hash = malloc(nodes * sizeof(int));
create_rotate_hash(nodes, height, width, groups, rotate_hash);
if(!enable_halfway && groups != 1)
create_symmetric_edge(edge, based_nodes, based_lines, groups, max_degree, degree, nodes,
lines, height, width, based_height, low_length);
verfy_graph(nodes, lines, edge, height, low_length, max_degree);
lower_bound_of_diam_aspl(&low_diam, &low_ASPL, width, height, max_degree, low_length);
check_current_edge(nodes, lines, max_degree, degree, edge, low_ASPL, low_diam, groups, height, based_height,
enable_bfs, rotate_hash);
double average_time = estimated_elapse_time(nodes, lines, max_degree, degree, edge, height, width, based_height, groups,
low_length, enable_bfs, rotate_hash);
if(enable_hill_climbing){
fixed_temp = max_temp = min_temp = 0.0;
cooling_rate = 1.0;
}
else{
cooling_rate = pow(min_temp/max_temp, (double)cooling_cycle/ncalcs);
}
if(enable_outfname && rank == 0){
struct stat stat_buf;
if(stat(outfname, &stat_buf) == 0)
ERROR("Output file %s exsits. \n", outfname);
if((fp = fopen(outfname, "w")) == NULL)
ERROR("Cannot open %s\n", outfname);
}
output_params(max_degree, groups, low_length, random_seed, max_temp, min_temp, ncalcs,
cooling_cycle, cooling_rate, infname, outfname, average_time,
enable_hill_climbing, width, height, enable_bfs, enable_fixed_temp, fixed_temp);
// Optimization
timer_clear_all();
timer_start(TIMER_SA);
long long step = sa(nodes, lines, max_degree, degree, based_nodes, ncalcs, cooling_rate, low_diam, low_ASPL, enable_bfs,
enable_hill_climbing, enable_detect_temp, &max_diff_energy, max_temp,
min_temp, fixed_temp, edge, &diam, &ASPL, cooling_cycle, &num_accepts, width,
based_width, height, based_height, low_length, groups, rotate_hash, enable_fixed_temp);
timer_stop(TIMER_SA);
if(enable_detect_temp){
// Set max temperature to accept it 50% in maximum diff energy.
PRINT_R0("Proposed max temperature is %f\n", (-1.0 * max_diff_energy) / log(0.5));
// Set min temperature to accept it 0.01% in minimum diff energy.
END("Proposed min temperature is %f\n", (-2.0) / log(0.0001));
}
// Output results
PRINT_R0("---\n");
PRINT_R0("Diam. k = %d ASPL l = %f Diam. gap = %d ASPL gap = %f\n",
diam, ASPL, diam-low_diam, ASPL-low_ASPL);
double time_sa = timer_read(TIMER_SA);
double time_apsp = timer_read(TIMER_APSP);
double time_check = timer_read(TIMER_CHECK);
PRINT_R0("Steps: %lld Elapse time: %f sec. (APSP: %f sec. Check: %f sec. Other: %f sec.)\n",
step, time_sa, time_apsp, time_check, time_sa-(time_apsp+time_check));
if(ncalcs > SKIP_ACCEPTS)
PRINT_R0("Accept rate: %f (= %lld/%lld)\n",
(double)num_accepts/(ncalcs-SKIP_ACCEPTS), num_accepts, ncalcs-SKIP_ACCEPTS);
if(rank == 0 && enable_outfname){
output_file(fp, lines, height, edge);
fclose(fp);
}
verfy_graph(nodes, lines, edge, height, low_length, max_degree);
MPI_Finalize();
free(edge);
free(degree);
free(rotate_hash);
return 0;
}
|
shared_private.c | #include<stdio.h>
#include<omp.h>
#include<sys/time.h>
#include<unistd.h>
#define ARRAY_SIZE 1024768
int main(int argc, char *argv[]) {
int i;
int *a = (int *) malloc(sizeof(int) * ARRAY_SIZE);
int *b = (int *) malloc(sizeof(int) * ARRAY_SIZE);
int *c = (int *) malloc(sizeof(int) * ARRAY_SIZE);
struct timeval tstart, tend;
gettimeofday(&tstart, NULL);
#pragma omp parallel for shared(a,b,c) private(i)
for(i=0;i<ARRAY_SIZE;++i) {
c[i] = a[i] + b[i];
}
gettimeofday(&tend, NULL);
printf("Time taken is:%d\n",(tend.tv_usec - tstart.tv_usec)
+ (tend.tv_sec - tstart.tv_sec) * 1000000);
return 0;
}
|
avx2-vect-aggressive.c | /* { dg-do run } */
/* { dg-require-effective-target avx2 } */
/* { dg-options "-mavx2 -O3 -fopenmp-simd -fdump-tree-vect-details" } */
#include "avx2-check.h"
#define N 64
float a[N];
int c[N];
__attribute__ ((noinline)) int
foo ()
{
int i, res = 0;
#pragma omp simd safelen(8)
for (i=0; i<N; i++)
{
float t = a[i];
if (t > 0.0f & t < 1.0e+2f)
if (c[i] != 0)
res += 1;
}
return res;
}
__attribute__ ((noinline)) float
hundred ()
{
return 100.0f;
}
static void
avx2_test (void)
{
int i, res;
for (i=0; i<N; i++)
{
c[i] = i % 4;
if (i < N / 2)
a[i] = (float) (i + 1);
else
a[i] = (float) i + hundred ();
}
if (foo () != 24)
abort ();
}
/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
|
grid.c | /* Copyright 2014-2015 The Regents of the University of California.
* Copyright 2015-2019 Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* 2011-2019 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2014 Frank Ong <frankong@berkeley.edu>
*/
#include <math.h>
#include <complex.h>
#include <assert.h>
#include <string.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/specfun.h"
#include "misc/nested.h"
#include "misc/misc.h"
#include "grid.h"
static double kb(double beta, double x)
{
if (fabs(x) >= 0.5)
return 0.;
return bessel_i0(beta * sqrt(1. - pow(2. * x, 2.))) / bessel_i0(beta);
}
static void kb_precompute(double beta, int n, float table[n + 1])
{
for (int i = 0; i < n + 1; i++)
table[i] = kb(beta, (double)(i) / (double)(n - 1) / 2.);
}
static double ftkb(double beta, double x)
{
double a = sqrt(pow(beta, 2.) - pow(M_PI * x, 2.));
return ((0. == a) ? 1. : (a / sinh(a))); // * bessel_i0(beta);
}
static double rolloff(double x, double beta, double width)
{
return ftkb(beta, x * width) / ftkb(beta, 0.);
}
// Linear interpolation
static float lerp(float a, float b, float c)
{
return (1. - c) * a + c * b;
}
// Linear interpolation look up
static float intlookup(int n, const float table[n + 1], float x)
{
float fpart;
// fpart = modff(x * n, &ipart);
// int index = ipart;
int index = (int)(x * (n - 1));
fpart = x * (n - 1) - (float)index;
#if 1
assert(index >= 0);
assert(index <= n);
assert(fpart >= 0.);
assert(fpart <= 1.);
#endif
float l = lerp(table[index], table[index + 1], fpart);
#if 1
assert(l <= 1.);
assert(0 >= 0.);
#endif
return l;
}
enum { kb_size = 100 };
static float kb_table[kb_size + 1];
static float kb_beta = -1.;
void gridH(const struct grid_conf_s* conf, const complex float* traj, const long ksp_dims[4], complex float* dst, const long grid_dims[4], const complex float* grid)
{
long C = ksp_dims[3];
// precompute kaiser bessel table
#pragma omp critical
if (-1 == kb_beta) {
kb_precompute(conf->beta, kb_size, kb_table);
kb_beta = conf->beta;
}
assert(fabs(kb_beta - conf->beta) < 1.E-6);
assert(1 == ksp_dims[0]);
long samples = ksp_dims[1] * ksp_dims[2];
#pragma omp parallel for
for(int i = 0; i < samples; i++) {
float pos[3];
pos[0] = conf->os * (creal(traj[i * 3 + 0]));
pos[1] = conf->os * (creal(traj[i * 3 + 1]));
pos[2] = conf->os * (creal(traj[i * 3 + 2]));
pos[0] += (grid_dims[0] > 1) ? ((float)grid_dims[0] / 2.) : 0.;
pos[1] += (grid_dims[1] > 1) ? ((float)grid_dims[1] / 2.) : 0.;
pos[2] += (grid_dims[2] > 1) ? ((float)grid_dims[2] / 2.) : 0.;
complex float val[C];
for (int j = 0; j < C; j++)
val[j] = 0.0;
grid_pointH(C, 3, grid_dims, pos, val, grid, conf->periodic, conf->width, kb_size, kb_table);
for (int j = 0; j < C; j++)
dst[j * samples + i] += val[j];
}
}
void grid(const struct grid_conf_s* conf, const complex float* traj, const long grid_dims[4], complex float* grid, const long ksp_dims[4], const complex float* src)
{
long C = ksp_dims[3];
// precompute kaiser bessel table
#pragma omp critical
if (-1 == kb_beta) {
kb_precompute(conf->beta, kb_size, kb_table);
kb_beta = conf->beta;
}
assert(fabs(kb_beta - conf->beta) < 1.E-6);
assert(1 == ksp_dims[0]);
long samples = ksp_dims[1] * ksp_dims[2];
// grid
#pragma omp parallel for
for(int i = 0; i < samples; i++) {
float pos[3];
pos[0] = conf->os * (creal(traj[i * 3 + 0]));
pos[1] = conf->os * (creal(traj[i * 3 + 1]));
pos[2] = conf->os * (creal(traj[i * 3 + 2]));
pos[0] += (grid_dims[0] > 1) ? ((float) grid_dims[0] / 2.) : 0.;
pos[1] += (grid_dims[1] > 1) ? ((float) grid_dims[1] / 2.) : 0.;
pos[2] += (grid_dims[2] > 1) ? ((float) grid_dims[2] / 2.) : 0.;
complex float val[C];
for (int j = 0; j < C; j++)
val[j] = src[j * samples + i];
grid_point(C, 3, grid_dims, pos, grid, val, conf->periodic, conf->width, kb_size, kb_table);
}
}
static void grid2_dims(unsigned int D, const long trj_dims[D], const long ksp_dims[D], const long grid_dims[D])
{
assert(D >= 4);
assert(md_check_compat(D - 3, ~0, grid_dims + 3, ksp_dims + 3));
// assert(md_check_compat(D - 3, ~(MD_BIT(0) | MD_BIT(1)), trj_dims + 3, ksp_dims + 3));
assert(md_check_bounds(D - 3, ~0, trj_dims + 3, ksp_dims + 3));
assert(3 == trj_dims[0]);
assert(1 == trj_dims[3]);
assert(1 == ksp_dims[0]);
}
void grid2(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long grid_dims[D], complex float* dst, const long ksp_dims[D], const complex float* src)
{
grid2_dims(D, trj_dims, ksp_dims, grid_dims);
long ksp_strs[D];
md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE);
long trj_strs[D];
md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE);
long grid_strs[D];
md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE);
long pos[D];
for (unsigned int i = 0; i < D; i++)
pos[i] = 0;
do {
grid(conf, &MD_ACCESS(D, trj_strs, pos, traj),
grid_dims, &MD_ACCESS(D, grid_strs, pos, dst),
ksp_dims, &MD_ACCESS(D, ksp_strs, pos, src));
} while(md_next(D, ksp_dims, (~0 ^ 15), pos));
}
void grid2H(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long ksp_dims[D], complex float* dst, const long grid_dims[D], const complex float* src)
{
grid2_dims(D, trj_dims, ksp_dims, grid_dims);
long ksp_strs[D];
md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE);
long trj_strs[D];
md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE);
long grid_strs[D];
md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE);
long pos[D];
for (unsigned int i = 0; i < D; i++)
pos[i] = 0;
do {
gridH(conf, &MD_ACCESS(D, trj_strs, pos, traj),
ksp_dims, &MD_ACCESS(D, ksp_strs, pos, dst),
grid_dims, &MD_ACCESS(D, grid_strs, pos, src));
} while(md_next(D, ksp_dims, (~0 ^ 15), pos));
}
typedef void CLOSURE_TYPE(grid_update_t)(long ind, float d);
#ifndef __clang__
#define VLA(x) x
#else
// blocks extension does not play well even with arguments which
// just look like variably-modified types
#define VLA(x)
#endif
static void grid_point_gen(int N, const long dims[VLA(N)], const float pos[VLA(N)], bool periodic, float width, int kb_size, const float kb_table[VLA(kb_size + 1)], grid_update_t update)
{
#ifndef __clang__
int sti[N];
int eni[N];
int off[N];
#else
// blocks extension does not play well with variably-modified types
int* sti = alloca(sizeof(int[N]));
int* eni = alloca(sizeof(int[N]));
int* off = alloca(sizeof(int[N]));
#endif
for (int j = 0; j < N; j++) {
sti[j] = (int)ceil(pos[j] - width);
eni[j] = (int)floor(pos[j] + width);
off[j] = 0;
if (sti[j] > eni[j])
return;
if (!periodic) {
sti[j] = MAX(sti[j], 0);
eni[j] = MIN(eni[j], dims[j] - 1);
} else {
while (sti[j] + off[j] < 0)
off[j] += dims[j];
}
if (1 == dims[j]) {
assert(0. == pos[j]); // ==0. fails nondeterministically for test_nufft_forward bbdec08cb
sti[j] = 0;
eni[j] = 0;
}
}
__block NESTED(void, grid_point_r, (int N, long ind, float d)) // __block for recursion
{
if (0 == N) {
NESTED_CALL(update, (ind, d));
} else {
N--;
for (int w = sti[N]; w <= eni[N]; w++) {
float frac = fabs(((float)w - pos[N]));
float d2 = d * intlookup(kb_size, kb_table, frac / width);
long ind2 = (ind * dims[N] + ((w + off[N]) % dims[N]));
grid_point_r(N, ind2, d2);
}
}
};
grid_point_r(N, 0, 1.);
}
void grid_point(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float* dst, const complex float val[VLA(ch)], bool periodic, float width, int kb_size, const float kb_table[kb_size + 1])
{
NESTED(void, update, (long ind, float d))
{
for (unsigned int c = 0; c < ch; c++) {
// we are allowed to update real and imaginary part independently which works atomically
#pragma omp atomic
__real(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __real(val[c]) * d;
#pragma omp atomic
__imag(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __imag(val[c]) * d;
}
};
grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update);
}
void grid_pointH(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float val[VLA(ch)], const complex float* src, bool periodic, float width, int kb_size, const float kb_table[kb_size + 1])
{
NESTED(void, update, (long ind, float d))
{
for (unsigned int c = 0; c < ch; c++) {
// we are allowed to update real and imaginary part independently which works atomically
#pragma omp atomic
__real(val[c]) += __real(src[ind + c * dims[0] * dims[1] * dims[2]]) * d;
#pragma omp atomic
__imag(val[c]) += __imag(src[ind + c * dims[0] * dims[1] * dims[2]]) * d;
}
};
grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update);
}
double calc_beta(float os, float width)
{
return M_PI * sqrt(pow((width * 2. / os) * (os - 0.5), 2.) - 0.8);
}
static float pos(int d, int i)
{
return (1 == d) ? 0. : (((float)i - (float)d / 2.) / (float)d);
}
void rolloff_correction(float os, float width, float beta, const long dimensions[3], complex float* dst)
{
UNUSED(os);
#pragma omp parallel for collapse(3)
for (int z = 0; z < dimensions[2]; z++)
for (int y = 0; y < dimensions[1]; y++)
for (int x = 0; x < dimensions[0]; x++)
dst[x + dimensions[0] * (y + z * dimensions[1])]
= rolloff(pos(dimensions[0], x), beta, width)
* rolloff(pos(dimensions[1], y), beta, width)
* rolloff(pos(dimensions[2], z), beta, width);
}
|
utils.c | // @brief : Implementations of some helper functions I use here and there
// @author : Hua Huang <huangh223@gatech.edu>
// @modified : 2020-12-03
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <complex.h>
#include <sys/time.h>
#include <math.h>
#include "utils.h"
// Get wall-clock time in seconds
double get_wtime_sec()
{
double sec;
struct timeval tv;
gettimeofday(&tv, NULL);
sec = tv.tv_sec + (double) tv.tv_usec / 1000000.0;
return sec;
}
// Partition an array into multiple same-size blocks and return the
// start position of a given block
void calc_block_spos_len(
const int len, const int nblk, const int iblk,
int *blk_spos, int *blk_len
)
{
if (iblk < 0 || iblk > nblk)
{
*blk_spos = -1;
*blk_len = 0;
return;
}
int rem = len % nblk;
int bs0 = len / nblk;
int bs1 = bs0 + 1;
if (iblk < rem)
{
*blk_spos = bs1 * iblk;
*blk_len = bs1;
} else {
*blk_spos = bs0 * iblk + rem;
*blk_len = bs0;
}
}
// Allocate a piece of aligned memory
void *malloc_aligned(size_t size, size_t alignment)
{
void *ptr = NULL;
posix_memalign(&ptr, alignment, size);
return ptr;
}
// Free a piece of aligned memory allocated by malloc_aligned()
void free_aligned(void *mem)
{
free(mem);
}
// Calculate the 2-norm of a vector
// Warning: this is a naive implementation, not numerically stable
double calc_2norm(const int len, const double *x)
{
double res = 0.0;
for (int i = 0; i < len; i++) res += x[i] * x[i];
return sqrt(res);
}
// Calculate the 2-norm of the difference between two vectors
// and the 2-norm of the reference vector
void calc_err_2norm(
const int len, const double *x0, const double *x1,
double *x0_2norm_, double *err_2norm_
)
{
double x0_2norm = 0.0, err_2norm = 0.0, diff;
for (int i = 0; i < len; i++)
{
diff = x0[i] - x1[i];
x0_2norm += x0[i] * x0[i];
err_2norm += diff * diff;
}
*x0_2norm_ = sqrt(x0_2norm);
*err_2norm_ = sqrt(err_2norm);
}
// Copy a row-major matrix block to another row-major matrix
void copy_matrix_block(
const size_t dt_size, const int nrow, const int ncol,
const void *src, const int lds, void *dst, const int ldd
)
{
const char *src_ = (char*) src;
char *dst_ = (char*) dst;
const size_t lds_ = dt_size * (size_t) lds;
const size_t ldd_ = dt_size * (size_t) ldd;
const size_t row_msize = dt_size * (size_t) ncol;
for (int irow = 0; irow < nrow; irow++)
{
size_t src_offset = (size_t) irow * lds_;
size_t dst_offset = (size_t) irow * ldd_;
memcpy(dst_ + dst_offset, src_ + src_offset, row_msize);
}
}
// Gather elements from a vector to another vector
void gather_vector_elements(const size_t dt_size, const int nelem, const int *idx, const void *src, void *dst)
{
if (dt_size == 4)
{
const float *src_ = (float*) src;
float *dst_ = (float*) dst;
#if defined(_OPENMP)
#pragma omp simd
#endif
for (int i = 0; i < nelem; i++) dst_[i] = src_[idx[i]];
}
if (dt_size == 8)
{
const double *src_ = (double*) src;
double *dst_ = (double*) dst;
#if defined(_OPENMP)
#pragma omp simd
#endif
for (int i = 0; i < nelem; i++) dst_[i] = src_[idx[i]];
}
if (dt_size == 16)
{
const double _Complex *src_ = (double _Complex*) src;
double _Complex *dst_ = (double _Complex*) dst;
#if defined(_OPENMP)
#pragma omp simd
#endif
for (int i = 0; i < nelem; i++) dst_[i] = src_[idx[i]];
}
}
// Gather rows from a matrix to another matrix
void gather_matrix_rows(
const size_t dt_size, const int nrow, const int ncol, const int *idx,
const void *src, const int lds, void *dst, const int ldd
)
{
const char *src_ = (char*) src;
char *dst_ = (char*) dst;
const size_t lds_ = dt_size * (size_t) lds;
const size_t ldd_ = dt_size * (size_t) ldd;
const size_t row_msize = dt_size * (size_t) ncol;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static)
#endif
for (int irow = 0; irow < nrow; irow++)
{
size_t src_offset = (size_t) idx[irow] * lds_;
size_t dst_offset = (size_t) irow * ldd_;
memcpy(dst_ + dst_offset, src_ + src_offset, row_msize);
}
}
// Gather columns from a matrix to another matrix
void gather_matrix_cols(
const size_t dt_size, const int nrow, const int ncol, const int *idx,
const void *src, const int lds, void *dst, const int ldd
)
{
const char *src_ = (char*) src;
char *dst_ = (char*) dst;
const size_t lds_ = dt_size * (size_t) lds;
const size_t ldd_ = dt_size * (size_t) ldd;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static)
#endif
for (int irow = 0; irow < nrow; irow++)
{
size_t src_offset = (size_t) irow * lds_;
size_t dst_offset = (size_t) irow * ldd_;
gather_vector_elements(dt_size, ncol, idx, src_ + src_offset, dst_ + dst_offset);
}
}
// Print a row-major int matrix block to standard output
void print_int_mat_blk(
const int *mat, const int ldm, const int nrow, const int ncol,
const char *fmt, const char *mat_name
)
{
printf("%s:\n", mat_name);
for (int i = 0; i < nrow; i++)
{
const int *mat_i = mat + i * ldm;
for (int j = 0; j < ncol; j++)
{
printf(fmt, mat_i[j]);
printf(" ");
}
printf("\n");
}
printf("\n");
}
// Print a row-major double matrix block to standard output
void print_dbl_mat_blk(
const double *mat, const int ldm, const int nrow, const int ncol,
const char *fmt, const char *mat_name
)
{
printf("%s:\n", mat_name);
for (int i = 0; i < nrow; i++)
{
const double *mat_i = mat + i * ldm;
for (int j = 0; j < ncol; j++)
{
printf(fmt, mat_i[j]);
printf(" ");
}
printf("\n");
}
printf("\n");
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(4*t1+Ny+5,32)),floord(8*t2+Ny+4,32)),floord(8*t1-8*t2+Nz+Ny+3,32));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32)),ceild(32*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(4*t1+Nx+5,32)),floord(8*t2+Nx+4,32)),floord(32*t3+Nx+28,32)),floord(8*t1-8*t2+Nz+Nx+3,32));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),32*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),32*t3+30),32*t4+30),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
RecordTable.h | /*
* Souffle - A Datalog Compiler
* Copyright (c) 2020, The Souffle Developers. All rights reserved.
* Licensed under the Universal Permissive License v 1.0 as shown at:
* - https://opensource.org/licenses/UPL
* - <souffle root>/licenses/SOUFFLE-UPL.txt
*/
/************************************************************************
*
* @file RecordTable.h
*
* Data container to store Records of the Datalog program.
*
***********************************************************************/
#pragma once
#include "CompiledTuple.h"
#include "ParallelUtils.h"
#include "RamTypes.h"
#include <algorithm>
#include <cassert>
#include <iostream>
#include <limits>
#include <map>
#include <unordered_map>
#include <vector>
namespace souffle {
/**
* A bidirectional mapping between tuples and reference indices.
*/
class RecordMap {
/** The arity of the stored tuples */
const size_t arity;
/** The mapping from tuples to references/indices */
std::map<std::vector<RamDomain>, RamDomain> recordToIndex;
/** The mapping from indices to tuples */
std::vector<std::vector<RamDomain>> indexToRecord;
public:
explicit RecordMap(size_t arity) : arity(arity), indexToRecord(1) {} // note: index 0 element left free
/**
* Pack the given vector -- create a new reference in necessary.
*/
RamDomain pack(const std::vector<RamDomain>& vector) {
RamDomain index;
#pragma omp critical(record_pack)
{
auto pos = recordToIndex.find(vector);
if (pos != recordToIndex.end()) {
index = pos->second;
} else {
#pragma omp critical(record_unpack)
{
indexToRecord.push_back(vector);
index = indexToRecord.size() - 1;
recordToIndex[vector] = index;
// assert that new index is smaller than the range
assert(index != std::numeric_limits<RamDomain>::max());
}
}
}
return index;
}
/**
* Packs the given tuple -- and may create a new reference if necessary.
*/
RamDomain pack(const RamDomain* tuple) {
std::vector<RamDomain> tmp(arity);
for (size_t i = 0; i < arity; i++) {
tmp[i] = tuple[i];
}
return pack(tmp);
}
/**
* Obtains a pointer to the tuple addressed by the given index.
*/
RamDomain* unpack(RamDomain index) {
RamDomain* res;
#pragma omp critical(record_unpack)
res = indexToRecord[index].data();
return res;
}
const RamDomain* unpack(RamDomain index) const {
const RamDomain* res;
#pragma omp critical(record_unpack)
res = indexToRecord[index].data();
return res;
}
};
class RecordTable {
public:
RecordTable() = default;
virtual ~RecordTable() = default;
/**
* A function packing a tuple of the given arity into a reference.
*/
RamDomain pack(RamDomain* tuple, size_t arity) {
return getForArity(arity).pack(tuple);
}
/**
* A function packing a vector into a reference.
*/
RamDomain pack(const std::vector<RamDomain>& vector) {
return getForArity(vector.size()).pack(vector);
}
/**
* A function packing a tuple of the given arity into a reference.
*/
template <typename Domain, std::size_t Arity>
RamDomain pack(ram::Tuple<Domain, Arity> tuple) {
return getForArity(Arity).pack(static_cast<RamDomain*>(tuple.data));
}
/**
* A function obtaining a pointer to the tuple addressed by the given reference.
*/
RamDomain* unpack(RamDomain ref, size_t arity) {
auto iter = maps.find(arity);
assert(iter != maps.end() && "Attempting to unpack non-existing record");
return (iter->second).unpack(ref);
}
/**
* A function obtaining a pointer to the tuple addressed by the given reference.
*/
const RamDomain* unpack(RamDomain ref, size_t arity) const {
auto iter = maps.find(arity);
assert(iter != maps.end() && "Attempting to unpack non-existing record");
return (iter->second).unpack(ref);
}
/**
* A function obtaining a pointer to the tuple addressed by the given reference.
*/
template <typename Domain, std::size_t Arity>
ram::Tuple<Domain, Arity> unpackTuple(RamDomain ref) {
ram::Tuple<RamDomain, Arity> tuple;
RamDomain* data = getForArity(Arity).unpack(ref);
for (size_t i = 0; i < Arity; ++i) {
tuple.data[i] = data[i];
}
return tuple;
}
/**
* Determines whether the given reference is the nil reference encoding
* the absence of any nested record.
*/
bool isNil(RamDomain ref) const {
return ref == getNil();
}
static constexpr RamDomain getNil() {
return 0;
}
private:
std::unordered_map<size_t, RecordMap> maps;
RecordMap& getForArity(size_t arity) {
std::unordered_map<size_t, RecordMap>::iterator mapsIterator;
#pragma omp critical(RecordTableGetForArity)
{
// This will create a new map if it doesn't exist yet.
mapsIterator = maps.emplace(arity, arity).first;
}
return mapsIterator->second;
}
};
} // namespace souffle
|
inputOmpfor2.c | /*************************************************
omp for with decremental loop iteration control
**************************************************/
#include <stdio.h>
#ifdef _OPENMP
#include "omp.h"
#endif
static long num_steps=10000000;
double step;
int k_3=100;
// int k_4=100;
int main()
{
double x,pi, sum=0.0;
int i;
step=1.0/(double)num_steps;
#pragma omp parallel private (x)
{
#pragma omp single
printf("Running using %d threads..\n", omp_get_num_threads());
#pragma omp for reduction(+:sum) schedule(static)
for(i=num_steps;i>=1;i=i-1)
//for(i=1;i<=num_steps;i++)
{
k_3++;
x=(i-0.5)*step;
sum=sum+ 4.0/(1.0+x*x);
}
}
pi=step*sum;
printf("step:%e sum:%f PI=%.20f\n",step,sum, pi);
return 0;
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __AVX__ || __SSE__
__m128 k0_data = _mm_loadu_ps(k0);
__m128 k1_data = _mm_loadu_ps(k1);
__m128 k2_data = _mm_loadu_ps(k2);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
__m128 r0_data = _mm_loadu_ps(r0);
__m128 r1_data = _mm_loadu_ps(r1);
__m128 r2_data = _mm_loadu_ps(r2);
__m128 r3_data = _mm_loadu_ps(r3);
__m128 sum = _mm_setzero_ps();
__m128 sum2 = _mm_setzero_ps();
float sum_sum = 0, sum_sum2 = 0;
sum = _mm_add_ps(_mm_mul_ps(r0_data, k0_data), sum);
sum = _mm_add_ps(_mm_mul_ps(r1_data, k1_data), sum);
sum = _mm_add_ps(_mm_mul_ps(r2_data, k2_data), sum);
sum_sum += sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2];
sum2 = _mm_add_ps(_mm_mul_ps(r1_data, k0_data), sum2);
sum2 = _mm_add_ps(_mm_mul_ps(r2_data, k1_data), sum2);
sum2 = _mm_add_ps(_mm_mul_ps(r3_data, k2_data), sum2);
sum_sum2 += sum2.m128_f32[0] + sum2.m128_f32[1] + sum2.m128_f32[2];
*outptr += sum_sum;
*outptr2 += sum_sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
__m128 r0_data = _mm_loadu_ps(r0);
__m128 r1_data = _mm_loadu_ps(r1);
__m128 r2_data = _mm_loadu_ps(r2);
__m128 sum = _mm_setzero_ps();
float sum_sum = 0;
#if USE_FMADD128
sum = _mm_fmadd_ps(r0_data, k0_data, sum);
sum = _mm_fmadd_ps(r1_data, k1_data, sum);
sum = _mm_fmadd_ps(r2_data, k2_data, sum);
sum_sum += sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2];
#else
sum = _mm_add_ps(_mm_mul_ps(r0_data, k0_data), sum);
sum = _mm_add_ps(_mm_mul_ps(r1_data, k1_data), sum);
sum = _mm_add_ps(_mm_mul_ps(r2_data, k2_data), sum);
sum_sum += sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2];
#endif
*outptr += sum_sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
#else
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
#endif
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*4, inch, outch);
// G
const float ktm[4][3] = {
{ 1.0f, 0.0f, 0.0f},
{ 1.0f/2, 1.0f/2, 1.0f/2},
{ 1.0f/2, -1.0f/2, 1.0f/2},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i=0; i<4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<4; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<4; i++)
{
kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4*4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0+4, _d1);
_mm_storeu_ps(out_tm0+8, _d2);
_mm_storeu_ps(out_tm0+12, _d3);
#else
float d0[4],d1[4],d2[4],d3[4];
float w0[4],w1[4],w2[4],w3[4];
float t0[4],t1[4],t2[4],t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 4] = d1[n];
out_tm0[n+ 8] = d2[n];
out_tm0[n+12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
for (int i=0; i<tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q+1).row(i);
const float* r2 = bottom_blob_tm.channel(q+2).row(i);
const float* r3 = bottom_blob_tm.channel(q+3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0+8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0+8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1+8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2+8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3+8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1+8);
_k0 = _mm256_loadu_ps(k0+16);
_k0n = _mm256_loadu_ps(k0+24);
_k1 = _mm256_loadu_ps(k1+16);
_k1n = _mm256_loadu_ps(k1+24);
_k2 = _mm256_loadu_ps(k2+16);
_k2n = _mm256_loadu_ps(k2+24);
_k3 = _mm256_loadu_ps(k3+16);
_k3n = _mm256_loadu_ps(k3+24);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2+8);
_k0 = _mm256_loadu_ps(k0+32);
_k0n = _mm256_loadu_ps(k0+40);
_k1 = _mm256_loadu_ps(k1+32);
_k1n = _mm256_loadu_ps(k1+40);
_k2 = _mm256_loadu_ps(k2+32);
_k2n = _mm256_loadu_ps(k2+40);
_k3 = _mm256_loadu_ps(k3+32);
_k3n = _mm256_loadu_ps(k3+40);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3+8);
_k0 = _mm256_loadu_ps(k0+48);
_k0n = _mm256_loadu_ps(k0+56);
_k1 = _mm256_loadu_ps(k1+48);
_k1n = _mm256_loadu_ps(k1+56);
_k2 = _mm256_loadu_ps(k2+48);
_k2n = _mm256_loadu_ps(k2+56);
_k3 = _mm256_loadu_ps(k3+48);
_k3n = _mm256_loadu_ps(k3+56);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0+8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0+8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1+8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2+8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3+8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm+8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm+8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm+8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm+8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q+1).row(i);
const float* r2 = bottom_blob_tm.channel(q+2).row(i);
const float* r3 = bottom_blob_tm.channel(q+3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
float* output0_tm = out0_tm.row(i);
#if __AVX__ || __SSE__
#if __AVX__
__m256 sum_0 = _mm256_setzero_ps();
__m256 sum_8 = _mm256_setzero_ps();
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
__m256 r0_0 = _mm256_loadu_ps(r0 + 0);
__m256 r0_8 = _mm256_loadu_ps(r0 + 8);
__m256 r1_0 = _mm256_loadu_ps(r1 + 0);
__m256 r1_8 = _mm256_loadu_ps(r1 + 8);
__m256 r2_0 = _mm256_loadu_ps(r2 + 0);
__m256 r2_8 = _mm256_loadu_ps(r2 + 8);
__m256 r3_0 = _mm256_loadu_ps(r3 + 0);
__m256 r3_8 = _mm256_loadu_ps(r3 + 8);
__m256 k0_0 = _mm256_loadu_ps(k0 + 0);
__m256 k0_8 = _mm256_loadu_ps(k0 + 8);
__m256 k1_0 = _mm256_loadu_ps(k1 + 0);
__m256 k1_8 = _mm256_loadu_ps(k1 + 8);
__m256 k2_0 = _mm256_loadu_ps(k2 + 0);
__m256 k2_8 = _mm256_loadu_ps(k2 + 8);
__m256 k3_0 = _mm256_loadu_ps(k3 + 0);
__m256 k3_8 = _mm256_loadu_ps(k3 + 8);
sum_0 = _mm256_add_ps(_mm256_mul_ps(r0_0, k0_0), sum_0);
sum_8 = _mm256_add_ps(_mm256_mul_ps(r0_8, k0_8), sum_8);
sum_0 = _mm256_add_ps(_mm256_mul_ps(r1_0, k1_0), sum_0);
sum_8 = _mm256_add_ps(_mm256_mul_ps(r1_8, k1_8), sum_8);
sum_0 = _mm256_add_ps(_mm256_mul_ps(r2_0, k2_0), sum_0);
sum_8 = _mm256_add_ps(_mm256_mul_ps(r2_8, k2_8), sum_8);
sum_0 = _mm256_add_ps(_mm256_mul_ps(r3_0, k3_0), sum_0);
sum_8 = _mm256_add_ps(_mm256_mul_ps(r3_8, k3_8), sum_8);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
__m256 r0_0 = _mm256_loadu_ps(r0 + 0);
__m256 r0_8 = _mm256_loadu_ps(r0 + 8);
__m256 k0_0 = _mm256_loadu_ps(k0 + 0);
__m256 k0_8 = _mm256_loadu_ps(k0 + 8);
sum_0 = _mm256_add_ps(_mm256_mul_ps(r0_0, k0_0), sum_0);
sum_8 = _mm256_add_ps(_mm256_mul_ps(r0_8, k0_8), sum_8);
}
_mm256_storeu_ps(output0_tm, sum_0);
_mm256_storeu_ps(output0_tm + 8, sum_8);
#else
__m128 sum_0 = _mm_setzero_ps();
__m128 sum_4 = _mm_setzero_ps();
__m128 sum_8 = _mm_setzero_ps();
__m128 sum_12 = _mm_setzero_ps();
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
__m128 r0_0 = _mm_loadu_ps(r0 + 0);
__m128 r0_4 = _mm_loadu_ps(r0 + 4);
__m128 r0_8 = _mm_loadu_ps(r0 + 8);
__m128 r0_12 = _mm_loadu_ps(r0 + 12);
__m128 r1_0 = _mm_loadu_ps(r1 + 0);
__m128 r1_4 = _mm_loadu_ps(r1 + 4);
__m128 r1_8 = _mm_loadu_ps(r1 + 8);
__m128 r1_12 = _mm_loadu_ps(r1 + 12);
__m128 r2_0 = _mm_loadu_ps(r2 + 0);
__m128 r2_4 = _mm_loadu_ps(r2 + 4);
__m128 r2_8 = _mm_loadu_ps(r2 + 8);
__m128 r2_12 = _mm_loadu_ps(r2 + 12);
__m128 r3_0 = _mm_loadu_ps(r3 + 0);
__m128 r3_4 = _mm_loadu_ps(r3 + 4);
__m128 r3_8 = _mm_loadu_ps(r3 + 8);
__m128 r3_12 = _mm_loadu_ps(r3 + 12);
__m128 k0_0 = _mm_loadu_ps(k0 + 0);
__m128 k0_4 = _mm_loadu_ps(k0 + 4);
__m128 k0_8 = _mm_loadu_ps(k0 + 8);
__m128 k0_12 = _mm_loadu_ps(k0 + 12);
__m128 k1_0 = _mm_loadu_ps(k1 + 0);
__m128 k1_4 = _mm_loadu_ps(k1 + 4);
__m128 k1_8 = _mm_loadu_ps(k1 + 8);
__m128 k1_12 = _mm_loadu_ps(k1 + 12);
__m128 k2_0 = _mm_loadu_ps(k2 + 0);
__m128 k2_4 = _mm_loadu_ps(k2 + 4);
__m128 k2_8 = _mm_loadu_ps(k2 + 8);
__m128 k2_12 = _mm_loadu_ps(k2 + 12);
__m128 k3_0 = _mm_loadu_ps(k3 + 0);
__m128 k3_4 = _mm_loadu_ps(k3 + 4);
__m128 k3_8 = _mm_loadu_ps(k3 + 8);
__m128 k3_12 = _mm_loadu_ps(k3 + 12);
sum_0 = _mm_add_ps(_mm_mul_ps(r0_0, k0_0), sum_0);
sum_4 = _mm_add_ps(_mm_mul_ps(r0_4, k0_4), sum_4);
sum_8 = _mm_add_ps(_mm_mul_ps(r0_8, k0_8), sum_8);
sum_12 = _mm_add_ps(_mm_mul_ps(r0_12, k0_12), sum_12);
sum_0 = _mm_add_ps(_mm_mul_ps(r1_0, k1_0), sum_0);
sum_4 = _mm_add_ps(_mm_mul_ps(r1_4, k1_4), sum_4);
sum_8 = _mm_add_ps(_mm_mul_ps(r1_8, k1_8), sum_8);
sum_12 = _mm_add_ps(_mm_mul_ps(r1_12, k1_12), sum_12);
sum_0 = _mm_add_ps(_mm_mul_ps(r2_0, k2_0), sum_0);
sum_4 = _mm_add_ps(_mm_mul_ps(r2_4, k2_4), sum_4);
sum_8 = _mm_add_ps(_mm_mul_ps(r2_8, k2_8), sum_8);
sum_12 = _mm_add_ps(_mm_mul_ps(r2_12, k2_12), sum_12);
sum_0 = _mm_add_ps(_mm_mul_ps(r3_0, k3_0), sum_0);
sum_4 = _mm_add_ps(_mm_mul_ps(r3_4, k3_4), sum_4);
sum_8 = _mm_add_ps(_mm_mul_ps(r3_8, k3_8), sum_8);
sum_12 = _mm_add_ps(_mm_mul_ps(r3_12, k3_12), sum_12);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
__m128 r0_0 = _mm_loadu_ps(r0 + 0);
__m128 r0_4 = _mm_loadu_ps(r0 + 4);
__m128 r0_8 = _mm_loadu_ps(r0 + 8);
__m128 r0_12 = _mm_loadu_ps(r0 + 12);
__m128 k0_0 = _mm_loadu_ps(k0 + 0);
__m128 k0_4 = _mm_loadu_ps(k0 + 4);
__m128 k0_8 = _mm_loadu_ps(k0 + 8);
__m128 k0_12 = _mm_loadu_ps(k0 + 12);
sum_0 = _mm_add_ps(_mm_mul_ps(r0_0, k0_0), sum_0);
sum_4 = _mm_add_ps(_mm_mul_ps(r0_4, k0_4), sum_4);
sum_8 = _mm_add_ps(_mm_mul_ps(r0_8, k0_8), sum_8);
sum_12 = _mm_add_ps(_mm_mul_ps(r0_12, k0_12), sum_12);
}
_mm_storeu_ps(output0_tm, sum_0);
_mm_storeu_ps(output0_tm + 4, sum_4);
_mm_storeu_ps(output0_tm + 8, sum_8);
_mm_storeu_ps(output0_tm + 12, sum_12);
#endif
#else
float sum0[16] = {0.0f};
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q+1).row(i);
const float* r2 = bottom_blob_tm.channel(q+2).row(i);
const float* r3 = bottom_blob_tm.channel(q+3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
const float* k2 = kernel0_tm.row(q+2);
const float* k3 = kernel0_tm.row(q+3);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n=0; n<16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
}
#endif
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j=0; j<nColBlocks; j++)
{
float* outRow0 = out.row(j*2);
float* outRow1 = out.row(j*2+1);
for(int i=0; i<nRowBlocks; i++)
{
float* out_tile = out_tm.row(j*nRowBlocks + i);
#if __AVX__ || __SSE__
__m128 s_0 = _mm_loadu_ps(out_tile);
__m128 s_4 = _mm_loadu_ps(out_tile + 4);
__m128 s_8 = _mm_loadu_ps(out_tile + 8);
__m128 s_12 = _mm_loadu_ps(out_tile + 12);
__m128 w0 = _mm_add_ps(s_0, s_4);
w0 = _mm_add_ps(w0, s_8);
__m128 w1 = _mm_sub_ps(s_4, s_8);
w1 = _mm_add_ps(w1, s_12);
float w0_array[4], w1_array[4];
_mm_storeu_ps(w0_array, w0);
_mm_storeu_ps(w1_array, w1);
// save to top blob tm
outRow0[0] = w0_array[0] + w0_array[1] + w0_array[2] + bias0;
outRow0[1] = w1_array[0] + w1_array[1] + w1_array[2] + bias0;
outRow1[0] = w0_array[1] - w0_array[2] + w0_array[3] + bias0;
outRow1[1] = w1_array[1] - w1_array[2] + w1_array[3] + bias0;
#else
float s0[4],s1[4],s2[4],s3[4];
float w0[4],w1[4];
float d0[2],d1[2],d2[2],d3[2];
float o0[2],o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 4];
s2[n] = out_tile[n+ 8];
s3[n] = out_tile[n+12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0];
d1[0] = w0[1]; d1[1] = w1[1];
d2[0] = w0[2]; d2[1] = w1[2];
d3[0] = w0[3]; d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
#endif
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6*6, inch, outch);
// G
const float ktm[6][3] = {
{ 1.0f/4, 0.0f, 0.0f},
{ -1.0f/6, -1.0f/6, -1.0f/6},
{ -1.0f/6, 1.0f/6, -1.0f/6},
{ 1.0f/24, 1.0f/12, 1.0f/6},
{ 1.0f/24, -1.0f/12, 1.0f/6},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __AVX__ || __SSE__
__m128 k0_data = _mm_loadu_ps(k0);
__m128 k1_data = _mm_loadu_ps(k1);
__m128 k2_data = _mm_loadu_ps(k2);
__m128 ktm0_data = _mm_loadu_ps(ktm[0]);
__m128 ktm1_data = _mm_loadu_ps(ktm[1]);
__m128 ktm2_data = _mm_loadu_ps(ktm[2]);
__m128 ktm3_data = _mm_loadu_ps(ktm[3]);
__m128 ktm4_data = _mm_loadu_ps(ktm[4]);
__m128 ktm5_data = _mm_loadu_ps(ktm[5]);
__m128 tmp00_data = _mm_mul_ps(k0_data, ktm0_data);
__m128 tmp01_data = _mm_mul_ps(k1_data, ktm0_data);
__m128 tmp02_data = _mm_mul_ps(k2_data, ktm0_data);
__m128 tmp10_data = _mm_mul_ps(k0_data, ktm1_data);
__m128 tmp11_data = _mm_mul_ps(k1_data, ktm1_data);
__m128 tmp12_data = _mm_mul_ps(k2_data, ktm1_data);
__m128 tmp20_data = _mm_mul_ps(k0_data, ktm2_data);
__m128 tmp21_data = _mm_mul_ps(k1_data, ktm2_data);
__m128 tmp22_data = _mm_mul_ps(k2_data, ktm2_data);
__m128 tmp30_data = _mm_mul_ps(k0_data, ktm3_data);
__m128 tmp31_data = _mm_mul_ps(k1_data, ktm3_data);
__m128 tmp32_data = _mm_mul_ps(k2_data, ktm3_data);
__m128 tmp40_data = _mm_mul_ps(k0_data, ktm4_data);
__m128 tmp41_data = _mm_mul_ps(k1_data, ktm4_data);
__m128 tmp42_data = _mm_mul_ps(k2_data, ktm4_data);
__m128 tmp50_data = _mm_mul_ps(k0_data, ktm5_data);
__m128 tmp51_data = _mm_mul_ps(k1_data, ktm5_data);
__m128 tmp52_data = _mm_mul_ps(k2_data, ktm5_data);
// h
float tmp[6][3] = { 0 };
tmp[0][0] += tmp00_data.m128_f32[0] + tmp00_data.m128_f32[1] + tmp00_data.m128_f32[2];
tmp[0][1] += tmp01_data.m128_f32[0] + tmp01_data.m128_f32[1] + tmp01_data.m128_f32[2];
tmp[0][2] += tmp02_data.m128_f32[0] + tmp02_data.m128_f32[1] + tmp02_data.m128_f32[2];
tmp[1][0] += tmp10_data.m128_f32[0] + tmp10_data.m128_f32[1] + tmp10_data.m128_f32[2];
tmp[1][1] += tmp11_data.m128_f32[0] + tmp11_data.m128_f32[1] + tmp11_data.m128_f32[2];
tmp[1][2] += tmp12_data.m128_f32[0] + tmp12_data.m128_f32[1] + tmp12_data.m128_f32[2];
tmp[2][0] += tmp20_data.m128_f32[0] + tmp20_data.m128_f32[1] + tmp20_data.m128_f32[2];
tmp[2][1] += tmp21_data.m128_f32[0] + tmp21_data.m128_f32[1] + tmp21_data.m128_f32[2];
tmp[2][2] += tmp22_data.m128_f32[0] + tmp22_data.m128_f32[1] + tmp22_data.m128_f32[2];
tmp[3][0] += tmp30_data.m128_f32[0] + tmp30_data.m128_f32[1] + tmp30_data.m128_f32[2];
tmp[3][1] += tmp31_data.m128_f32[0] + tmp31_data.m128_f32[1] + tmp31_data.m128_f32[2];
tmp[3][2] += tmp32_data.m128_f32[0] + tmp32_data.m128_f32[1] + tmp32_data.m128_f32[2];
tmp[4][0] += tmp40_data.m128_f32[0] + tmp40_data.m128_f32[1] + tmp40_data.m128_f32[2];
tmp[4][1] += tmp41_data.m128_f32[0] + tmp41_data.m128_f32[1] + tmp41_data.m128_f32[2];
tmp[4][2] += tmp42_data.m128_f32[0] + tmp42_data.m128_f32[1] + tmp42_data.m128_f32[2];
tmp[5][0] += tmp50_data.m128_f32[0] + tmp50_data.m128_f32[1] + tmp50_data.m128_f32[2];
tmp[5][1] += tmp51_data.m128_f32[0] + tmp51_data.m128_f32[1] + tmp51_data.m128_f32[2];
tmp[5][2] += tmp52_data.m128_f32[0] + tmp52_data.m128_f32[1] + tmp52_data.m128_f32[2];
// U
for (int j = 0; j < 6; j++)
{
__m128 tmpp = _mm_loadu_ps(tmp[j]);
__m128 result = _mm_mul_ps(tmpp, ktm0_data);
kernel_tm0[j * 6 + 0] = result.m128_f32[0] + result.m128_f32[1] + result.m128_f32[2];
result = _mm_mul_ps(tmpp, ktm1_data);
kernel_tm0[j * 6 + 1] = result.m128_f32[0] + result.m128_f32[1] + result.m128_f32[2];
result = _mm_mul_ps(tmpp, ktm2_data);
kernel_tm0[j * 6 + 2] = result.m128_f32[0] + result.m128_f32[1] + result.m128_f32[2];
result = _mm_mul_ps(tmpp, ktm3_data);
kernel_tm0[j * 6 + 3] = result.m128_f32[0] + result.m128_f32[1] + result.m128_f32[2];
result = _mm_mul_ps(tmpp, ktm4_data);
kernel_tm0[j * 6 + 4] = result.m128_f32[0] + result.m128_f32[1] + result.m128_f32[2];
result = _mm_mul_ps(tmpp, ktm5_data);
kernel_tm0[j * 6 + 5] = result.m128_f32[0] + result.m128_f32[1] + result.m128_f32[2];
}
#else
// h
float tmp[6][3];
for (int i=0; i<6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<6; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<6; i++)
{
kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
#endif
}
}
for (int r=0; r<9; r++)
{
Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4);
int p = 0;
for (; p+7<outch; p+=8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p+1);
const float* kernel2 = (const float*)kernel_tm.channel(p+2);
const float* kernel3 = (const float*)kernel_tm.channel(p+3);
const float* kernel4 = (const float*)kernel_tm.channel(p+4);
const float* kernel5 = (const float*)kernel_tm.channel(p+5);
const float* kernel6 = (const float*)kernel_tm.channel(p+6);
const float* kernel7 = (const float*)kernel_tm.channel(p+7);
float* ktmp = kernel_tm_test.channel(p/8);
for (int q=0; q<inch; q++)
{
#if __AVX__ || __SSE__
int offset = r * 4;
__m128 temp = _mm_loadu_ps(kernel0 + offset);
_mm_storeu_ps(ktmp, temp);
temp = _mm_loadu_ps(kernel1 + offset);
_mm_storeu_ps(ktmp + 4, temp);
temp = _mm_loadu_ps(kernel2 + offset);
_mm_storeu_ps(ktmp + 8, temp);
temp = _mm_loadu_ps(kernel3 + offset);
_mm_storeu_ps(ktmp + 12, temp);
temp = _mm_loadu_ps(kernel4 + offset);
_mm_storeu_ps(ktmp + 16, temp);
temp = _mm_loadu_ps(kernel5 + offset);
_mm_storeu_ps(ktmp + 20, temp);
temp = _mm_loadu_ps(kernel6 + offset);
_mm_storeu_ps(ktmp + 24, temp);
temp = _mm_loadu_ps(kernel7 + offset);
_mm_storeu_ps(ktmp + 28, temp);
#else
ktmp[0] = kernel0[r*4+0];
ktmp[1] = kernel0[r*4+1];
ktmp[2] = kernel0[r*4+2];
ktmp[3] = kernel0[r*4+3];
ktmp[4] = kernel1[r*4+0];
ktmp[5] = kernel1[r*4+1];
ktmp[6] = kernel1[r*4+2];
ktmp[7] = kernel1[r*4+3];
ktmp[8] = kernel2[r*4+0];
ktmp[9] = kernel2[r*4+1];
ktmp[10] = kernel2[r*4+2];
ktmp[11] = kernel2[r*4+3];
ktmp[12] = kernel3[r*4+0];
ktmp[13] = kernel3[r*4+1];
ktmp[14] = kernel3[r*4+2];
ktmp[15] = kernel3[r*4+3];
ktmp[16] = kernel4[r*4+0];
ktmp[17] = kernel4[r*4+1];
ktmp[18] = kernel4[r*4+2];
ktmp[19] = kernel4[r*4+3];
ktmp[20] = kernel5[r*4+0];
ktmp[21] = kernel5[r*4+1];
ktmp[22] = kernel5[r*4+2];
ktmp[23] = kernel5[r*4+3];
ktmp[24] = kernel6[r*4+0];
ktmp[25] = kernel6[r*4+1];
ktmp[26] = kernel6[r*4+2];
ktmp[27] = kernel6[r*4+3];
ktmp[28] = kernel7[r*4+0];
ktmp[29] = kernel7[r*4+1];
ktmp[30] = kernel7[r*4+2];
ktmp[31] = kernel7[r*4+3];
#endif
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p+3<outch; p+=4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p+1);
const float* kernel2 = (const float*)kernel_tm.channel(p+2);
const float* kernel3 = (const float*)kernel_tm.channel(p+3);
float* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4);
for (int q=0; q<inch; q++)
{
#if __AVX__ || __SSE__
int offset = r * 4;
__m128 temp = _mm_loadu_ps(kernel0 + offset);
_mm_storeu_ps(ktmp, temp);
temp = _mm_loadu_ps(kernel1 + offset);
_mm_storeu_ps(ktmp + 4, temp);
temp = _mm_loadu_ps(kernel2 + offset);
_mm_storeu_ps(ktmp + 8, temp);
temp = _mm_loadu_ps(kernel3 + offset);
_mm_storeu_ps(ktmp + 12, temp);
#else
ktmp[0] = kernel0[r*4+0];
ktmp[1] = kernel0[r*4+1];
ktmp[2] = kernel0[r*4+2];
ktmp[3] = kernel0[r*4+3];
ktmp[4] = kernel1[r*4+0];
ktmp[5] = kernel1[r*4+1];
ktmp[6] = kernel1[r*4+2];
ktmp[7] = kernel1[r*4+3];
ktmp[8] = kernel2[r*4+0];
ktmp[9] = kernel2[r*4+1];
ktmp[10] = kernel2[r*4+2];
ktmp[11] = kernel2[r*4+3];
ktmp[12] = kernel3[r*4+0];
ktmp[13] = kernel3[r*4+1];
ktmp[14] = kernel3[r*4+2];
ktmp[15] = kernel3[r*4+3];
#endif
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p<outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4);
for (int q=0; q<inch; q++)
{
#if __AVX__ || __SSE__
int offset = r * 4;
__m128 temp = _mm_loadu_ps(kernel0 + offset);
_mm_storeu_ps(ktmp, temp);
#else
ktmp[0] = kernel0[r*4+0];
ktmp[1] = kernel0[r*4+1];
ktmp[2] = kernel0[r*4+2];
ktmp[3] = kernel0[r*4+3];
#endif
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles*9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__ || __SSE__
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#else
__m128 _1_n = _mm_set1_ps(-1);
__m128 _2_p = _mm_set1_ps(2);
__m128 _2_n = _mm_set1_ps(-2);
__m128 _4_p = _mm_set1_ps(4);
__m128 _4_n = _mm_set1_ps(-4);
__m128 _5_n = _mm_set1_ps(-5);
#endif
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row(q);
#if __AVX__ || __SSE__
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#ifdef _WIN32
{
_t0.m256_f32[0]=_w0.m256_f32[0]; _t1.m256_f32[0]=_w0.m256_f32[1]; _t2.m256_f32[0]=_w0.m256_f32[2]; _t3.m256_f32[0]=_w0.m256_f32[3]; _t4.m256_f32[0]=_w0.m256_f32[4]; _t5.m256_f32[0]=_w0.m256_f32[5];
_t0.m256_f32[1]=_w1.m256_f32[0]; _t1.m256_f32[1]=_w1.m256_f32[1]; _t2.m256_f32[1]=_w1.m256_f32[2]; _t3.m256_f32[1]=_w1.m256_f32[3]; _t4.m256_f32[1]=_w1.m256_f32[4]; _t5.m256_f32[1]=_w1.m256_f32[5];
_t0.m256_f32[2]=_w2.m256_f32[0]; _t1.m256_f32[2]=_w2.m256_f32[1]; _t2.m256_f32[2]=_w2.m256_f32[2]; _t3.m256_f32[2]=_w2.m256_f32[3]; _t4.m256_f32[2]=_w2.m256_f32[4]; _t5.m256_f32[2]=_w2.m256_f32[5];
_t0.m256_f32[3]=_w3.m256_f32[0]; _t1.m256_f32[3]=_w3.m256_f32[1]; _t2.m256_f32[3]=_w3.m256_f32[2]; _t3.m256_f32[3]=_w3.m256_f32[3]; _t4.m256_f32[3]=_w3.m256_f32[4]; _t5.m256_f32[3]=_w3.m256_f32[5];
_t0.m256_f32[4]=_w4.m256_f32[0]; _t1.m256_f32[4]=_w4.m256_f32[1]; _t2.m256_f32[4]=_w4.m256_f32[2]; _t3.m256_f32[4]=_w4.m256_f32[3]; _t4.m256_f32[4]=_w4.m256_f32[4]; _t5.m256_f32[4]=_w4.m256_f32[5];
_t0.m256_f32[5]=_w5.m256_f32[0]; _t1.m256_f32[5]=_w5.m256_f32[1]; _t2.m256_f32[5]=_w5.m256_f32[2]; _t3.m256_f32[5]=_w5.m256_f32[3]; _t4.m256_f32[5]=_w5.m256_f32[4]; _t5.m256_f32[5]=_w5.m256_f32[5];
}
#else
{
_t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5];
_t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5];
_t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5];
_t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5];
_t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5];
_t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5];
}
#endif //!_WIN32
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};_mm256_storeu_ps(output_n5, _n5);
out_tm0[0]=output_n0[0];out_tm0[1]=output_n0[1];out_tm0[2]=output_n0[2];out_tm0[3]=output_n0[3];
out_tm1[0]=output_n0[4];out_tm1[1]=output_n0[5];out_tm1[2]=output_n1[0];out_tm1[3]=output_n1[1];
out_tm2[0]=output_n1[2];out_tm2[1]=output_n1[3];out_tm2[2]=output_n1[4];out_tm2[3]=output_n1[5];
out_tm3[0]=output_n2[0];out_tm3[1]=output_n2[1];out_tm3[2]=output_n2[2];out_tm3[3]=output_n2[3];
out_tm4[0]=output_n2[4];out_tm4[1]=output_n2[5];out_tm4[2]=output_n3[0];out_tm4[3]=output_n3[1];
out_tm5[0]=output_n3[2];out_tm5[1]=output_n3[3];out_tm5[2]=output_n3[4];out_tm5[3]=output_n3[5];
out_tm6[0]=output_n4[0];out_tm6[1]=output_n4[1];out_tm6[2]=output_n4[2];out_tm6[3]=output_n4[3];
out_tm7[0]=output_n4[4];out_tm7[1]=output_n4[5];out_tm7[2]=output_n5[0];out_tm7[3]=output_n5[1];
out_tm8[0]=output_n5[2];out_tm8[1]=output_n5[3];out_tm8[2]=output_n5[4];out_tm8[3]=output_n5[5];
#else
__m128 _d0_0, _d1_0, _d2_0, _d3_0, _d4_0, _d5_0;
__m128 _w0_0, _w1_0, _w2_0, _w3_0, _w4_0, _w5_0;
__m128 _t0_0, _t1_0, _t2_0, _t3_0, _t4_0, _t5_0;
__m128 _n0_0, _n1_0, _n2_0, _n3_0, _n4_0, _n5_0;
__m128 _d0_4, _d1_4, _d2_4, _d3_4, _d4_4, _d5_4;
__m128 _w0_4, _w1_4, _w2_4, _w3_4, _w4_4, _w5_4;
__m128 _t0_4, _t1_4, _t2_4, _t3_4, _t4_4, _t5_4;
__m128 _n0_4, _n1_4, _n2_4, _n3_4, _n4_4, _n5_4;
// load
_d0_0 = _mm_loadu_ps(r0);
_d1_0 = _mm_loadu_ps(r1);
_d2_0 = _mm_loadu_ps(r2);
_d3_0 = _mm_loadu_ps(r3);
_d4_0 = _mm_loadu_ps(r4);
_d5_0 = _mm_loadu_ps(r5);
_d0_4 = _mm_loadu_ps(r0 + 4);
_d1_4 = _mm_loadu_ps(r1 + 4);
_d2_4 = _mm_loadu_ps(r2 + 4);
_d3_4 = _mm_loadu_ps(r3 + 4);
_d4_4 = _mm_loadu_ps(r4 + 4);
_d5_4 = _mm_loadu_ps(r5 + 4);
// w = B_t * d
_w0_0 = _mm_mul_ps(_d0_0, _4_p);
_w0_0 = _mm_add_ps(_mm_mul_ps(_d2_0, _5_n), _w0_0);
_w0_0 = _mm_add_ps(_w0_0, _d4_0);
_w0_4 = _mm_mul_ps(_d0_4, _4_p);
_w0_4 = _mm_add_ps(_mm_mul_ps(_d2_4, _5_n), _w0_4);
_w0_4 = _mm_add_ps(_w0_4, _d4_4);
_w1_0 = _mm_mul_ps(_d1_0, _4_n);
_w1_0 = _mm_add_ps(_mm_mul_ps(_d2_0, _4_n), _w1_0);
_w1_0 = _mm_add_ps(_w1_0, _d3_0);
_w1_0 = _mm_add_ps(_w1_0, _d4_0);
_w1_4 = _mm_mul_ps(_d1_4, _4_n);
_w1_4 = _mm_add_ps(_mm_mul_ps(_d2_4, _4_n), _w1_4);
_w1_4 = _mm_add_ps(_w1_4, _d3_4);
_w1_4 = _mm_add_ps(_w1_4, _d4_4);
_w2_0 = _mm_mul_ps(_d1_0, _4_p);
_w2_0 = _mm_add_ps(_mm_mul_ps(_d2_0, _4_n), _w2_0);
_w2_0 = _mm_add_ps(_mm_mul_ps(_d3_0, _1_n), _w2_0);
_w2_0 = _mm_add_ps(_w2_0, _d4_0);
_w2_4 = _mm_mul_ps(_d1_4, _4_p);
_w2_4 = _mm_add_ps(_mm_mul_ps(_d2_4, _4_n), _w2_4);
_w2_4 = _mm_add_ps(_mm_mul_ps(_d3_4, _1_n), _w2_4);
_w2_4 = _mm_add_ps(_w2_4, _d4_4);
_w3_0 = _mm_mul_ps(_d1_0, _2_n);
_w3_0 = _mm_add_ps(_mm_mul_ps(_d2_0, _1_n), _w3_0);
_w3_0 = _mm_add_ps(_mm_mul_ps(_d3_0, _2_p), _w3_0);
_w3_0 = _mm_add_ps(_w3_0, _d4_0);
_w3_4 = _mm_mul_ps(_d1_4, _2_n);
_w3_4 = _mm_add_ps(_mm_mul_ps(_d2_4, _1_n), _w3_4);
_w3_4 = _mm_add_ps(_mm_mul_ps(_d3_4, _2_p), _w3_4);
_w3_4 = _mm_add_ps(_w3_4, _d4_4);
_w4_0 = _mm_mul_ps(_d1_0, _2_p);
_w4_0 = _mm_add_ps(_mm_mul_ps(_d2_0, _1_n), _w4_0);
_w4_0 = _mm_add_ps(_mm_mul_ps(_d3_0, _2_n), _w4_0);
_w4_0 = _mm_add_ps(_w4_0, _d4_0);
_w4_4 = _mm_mul_ps(_d1_4, _2_p);
_w4_4 = _mm_add_ps(_mm_mul_ps(_d2_4, _1_n), _w4_4);
_w4_4 = _mm_add_ps(_mm_mul_ps(_d3_4, _2_n), _w4_4);
_w4_4 = _mm_add_ps(_w4_4, _d4_4);
_w5_0 = _mm_mul_ps(_d1_0, _4_p);
_w5_0 = _mm_add_ps(_mm_mul_ps(_d3_0, _5_n), _w5_0);
_w5_0 = _mm_add_ps(_w5_0, _d5_0);
_w5_4 = _mm_mul_ps(_d1_4, _4_p);
_w5_4 = _mm_add_ps(_mm_mul_ps(_d3_4, _5_n), _w5_4);
_w5_4 = _mm_add_ps(_w5_4, _d5_4);
// transpose d to d_t
{
_t0_0.m128_f32[0] = _w0_0.m128_f32[0]; _t1_0.m128_f32[0] = _w0_0.m128_f32[1]; _t2_0.m128_f32[0] = _w0_0.m128_f32[2]; _t3_0.m128_f32[0] = _w0_0.m128_f32[3]; _t4_0.m128_f32[0] = _w0_4.m128_f32[0]; _t5_0.m128_f32[0] = _w0_4.m128_f32[1];
_t0_0.m128_f32[1] = _w1_0.m128_f32[0]; _t1_0.m128_f32[1] = _w1_0.m128_f32[1]; _t2_0.m128_f32[1] = _w1_0.m128_f32[2]; _t3_0.m128_f32[1] = _w1_0.m128_f32[3]; _t4_0.m128_f32[1] = _w1_4.m128_f32[0]; _t5_0.m128_f32[1] = _w1_4.m128_f32[1];
_t0_0.m128_f32[2] = _w2_0.m128_f32[0]; _t1_0.m128_f32[2] = _w2_0.m128_f32[1]; _t2_0.m128_f32[2] = _w2_0.m128_f32[2]; _t3_0.m128_f32[2] = _w2_0.m128_f32[3]; _t4_0.m128_f32[2] = _w2_4.m128_f32[0]; _t5_0.m128_f32[2] = _w2_4.m128_f32[1];
_t0_0.m128_f32[3] = _w3_0.m128_f32[0]; _t1_0.m128_f32[3] = _w3_0.m128_f32[1]; _t2_0.m128_f32[3] = _w3_0.m128_f32[2]; _t3_0.m128_f32[3] = _w3_0.m128_f32[3]; _t4_0.m128_f32[3] = _w3_4.m128_f32[0]; _t5_0.m128_f32[3] = _w3_4.m128_f32[1];
_t0_4.m128_f32[0] = _w4_0.m128_f32[0]; _t1_4.m128_f32[0] = _w4_0.m128_f32[1]; _t2_4.m128_f32[0] = _w4_0.m128_f32[2]; _t3_4.m128_f32[0] = _w4_0.m128_f32[3]; _t4_4.m128_f32[0] = _w4_4.m128_f32[0]; _t5_4.m128_f32[0] = _w4_4.m128_f32[1];
_t0_4.m128_f32[1] = _w5_0.m128_f32[0]; _t1_4.m128_f32[1] = _w5_0.m128_f32[1]; _t2_4.m128_f32[1] = _w5_0.m128_f32[2]; _t3_4.m128_f32[1] = _w5_0.m128_f32[3]; _t4_4.m128_f32[1] = _w5_4.m128_f32[0]; _t5_4.m128_f32[1] = _w5_4.m128_f32[1];
}
// d = B_t * d_t
_n0_0 = _mm_mul_ps(_t0_0, _4_p);
_n0_0 = _mm_add_ps(_mm_mul_ps(_t2_0, _5_n), _n0_0);
_n0_0 = _mm_add_ps(_n0_0, _t4_0);
_n0_4 = _mm_mul_ps(_t0_4, _4_p);
_n0_4 = _mm_add_ps(_mm_mul_ps(_t2_4, _5_n), _n0_4);
_n0_4 = _mm_add_ps(_n0_4, _t4_4);
_n1_0 = _mm_mul_ps(_t1_0, _4_n);
_n1_0 = _mm_add_ps(_mm_mul_ps(_t2_0, _4_n), _n1_0);
_n1_0 = _mm_add_ps(_n1_0, _t3_0);
_n1_0 = _mm_add_ps(_n1_0, _t4_0);
_n1_4 = _mm_mul_ps(_t1_4, _4_n);
_n1_4 = _mm_add_ps(_mm_mul_ps(_t2_4, _4_n), _n1_4);
_n1_4 = _mm_add_ps(_n1_4, _t3_4);
_n1_4 = _mm_add_ps(_n1_4, _t4_4);
_n2_0 = _mm_mul_ps(_t1_0, _4_p);
_n2_0 = _mm_add_ps(_mm_mul_ps(_t2_0, _4_n), _n2_0);
_n2_0 = _mm_add_ps(_mm_mul_ps(_t3_0, _1_n), _n2_0);
_n2_0 = _mm_add_ps(_n2_0, _t4_0);
_n2_4 = _mm_mul_ps(_t1_4, _4_p);
_n2_4 = _mm_add_ps(_mm_mul_ps(_t2_4, _4_n), _n2_4);
_n2_4 = _mm_add_ps(_mm_mul_ps(_t3_4, _1_n), _n2_4);
_n2_4 = _mm_add_ps(_n2_4, _t4_4);
_n3_0 = _mm_mul_ps(_t1_0, _2_n);
_n3_0 = _mm_add_ps(_mm_mul_ps(_t2_0, _1_n), _n3_0);
_n3_0 = _mm_add_ps(_mm_mul_ps(_t3_0, _2_p), _n3_0);
_n3_0 = _mm_add_ps(_n3_0, _t4_0);
_n3_4 = _mm_mul_ps(_t1_4, _2_n);
_n3_4 = _mm_add_ps(_mm_mul_ps(_t2_4, _1_n), _n3_4);
_n3_4 = _mm_add_ps(_mm_mul_ps(_t3_4, _2_p), _n3_4);
_n3_4 = _mm_add_ps(_n3_4, _t4_4);
_n4_0 = _mm_mul_ps(_t1_0, _2_p);
_n4_0 = _mm_add_ps(_mm_mul_ps(_t2_0, _1_n), _n4_0);
_n4_0 = _mm_add_ps(_mm_mul_ps(_t3_0, _2_n), _n4_0);
_n4_0 = _mm_add_ps(_n4_0, _t4_0);
_n4_4 = _mm_mul_ps(_t1_4, _2_p);
_n4_4 = _mm_add_ps(_mm_mul_ps(_t2_4, _1_n), _n4_4);
_n4_4 = _mm_add_ps(_mm_mul_ps(_t3_4, _2_n), _n4_4);
_n4_4 = _mm_add_ps(_n4_4, _t4_4);
_n5_0 = _mm_mul_ps(_t1_0, _4_p);
_n5_0 = _mm_add_ps(_mm_mul_ps(_t3_0, _5_n), _n5_0);
_n5_0 = _mm_add_ps(_n5_0, _t5_0);
_n5_4 = _mm_mul_ps(_t1_4, _4_p);
_n5_4 = _mm_add_ps(_mm_mul_ps(_t3_4, _5_n), _n5_4);
_n5_4 = _mm_add_ps(_n5_4, _t5_4);
// save to out_tm
float output_n0[8] = { 0.f }; _mm_storeu_ps(output_n0, _n0_0); _mm_storeu_ps(output_n0 + 4, _n0_4);
float output_n1[8] = { 0.f }; _mm_storeu_ps(output_n1, _n1_0); _mm_storeu_ps(output_n1 + 4, _n1_4);
float output_n2[8] = { 0.f }; _mm_storeu_ps(output_n2, _n2_0); _mm_storeu_ps(output_n2 + 4, _n2_4);
float output_n3[8] = { 0.f }; _mm_storeu_ps(output_n3, _n3_0); _mm_storeu_ps(output_n3 + 4, _n3_4);
float output_n4[8] = { 0.f }; _mm_storeu_ps(output_n4, _n4_0); _mm_storeu_ps(output_n4 + 4, _n4_4);
float output_n5[8] = { 0.f }; _mm_storeu_ps(output_n5, _n5_0); _mm_storeu_ps(output_n5 + 4, _n5_4);
out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5];
#endif
float d0[6],d1[6],d2[6],d3[6],d4[6],d5[6];
float w0[6],w1[6],w2[6],w3[6],w4[6],w5[6];
float t0[6],t1[6],t2[6],t3[6],t4[6],t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4*d0[n] - 5*d2[n] + d4[n];
w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n];
w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n];
w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n];
w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n];
w5[n] = 4*d1[n] - 5*d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5];
t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5];
t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4*t0[n] - 5*t2[n] + t4[n];
d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n];
d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n];
d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n];
d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n];
d5[n] = 4*t1[n] - 5*t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3];
out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1];
out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5];
out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3];
out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1];
out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5];
out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3];
out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1];
out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5];
}
#endif // __AVX__ || __SSE__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r=0; r<9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p+1);
float* output2_tm = top_blob_tm.channel(p+2);
float* output3_tm = top_blob_tm.channel(p+3);
float* output4_tm = top_blob_tm.channel(p+4);
float* output5_tm = top_blob_tm.channel(p+5);
float* output6_tm = top_blob_tm.channel(p+6);
float* output7_tm = top_blob_tm.channel(p+7);
output0_tm = output0_tm + r*4;
output1_tm = output1_tm + r*4;
output2_tm = output2_tm + r*4;
output3_tm = output3_tm + r*4;
output4_tm = output4_tm + r*4;
output5_tm = output5_tm + r*4;
output6_tm = output6_tm + r*4;
output7_tm = output7_tm + r*4;
for (int i=0; i<tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p/8);
const float* r0 = bottom_blob_tm.channel(tiles*r+i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q=0;
for (; q+3<inch; q=q+4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0+4);
__m128 _r2 = _mm_loadu_ps(r0+8);
__m128 _r3 = _mm_loadu_ps(r0+12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr+4);
__m128 _k2 = _mm_loadu_ps(kptr+8);
__m128 _k3 = _mm_loadu_ps(kptr+12);
__m128 _k4 = _mm_loadu_ps(kptr+16);
__m128 _k5 = _mm_loadu_ps(kptr+20);
__m128 _k6 = _mm_loadu_ps(kptr+24);
__m128 _k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr+4);
_k2 = _mm_loadu_ps(kptr+8);
_k3 = _mm_loadu_ps(kptr+12);
_k4 = _mm_loadu_ps(kptr+16);
_k5 = _mm_loadu_ps(kptr+20);
_k6 = _mm_loadu_ps(kptr+24);
_k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr+4);
_k2 = _mm_loadu_ps(kptr+8);
_k3 = _mm_loadu_ps(kptr+12);
_k4 = _mm_loadu_ps(kptr+16);
_k5 = _mm_loadu_ps(kptr+20);
_k6 = _mm_loadu_ps(kptr+24);
_k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr+4);
_k2 = _mm_loadu_ps(kptr+8);
_k3 = _mm_loadu_ps(kptr+12);
_k4 = _mm_loadu_ps(kptr+16);
_k5 = _mm_loadu_ps(kptr+20);
_k6 = _mm_loadu_ps(kptr+24);
_k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q<inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr+4);
__m128 _k2 = _mm_loadu_ps(kptr+8);
__m128 _k3 = _mm_loadu_ps(kptr+12);
__m128 _k4 = _mm_loadu_ps(kptr+16);
__m128 _k5 = _mm_loadu_ps(kptr+20);
__m128 _k6 = _mm_loadu_ps(kptr+24);
__m128 _k7 = _mm_loadu_ps(kptr+28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q=0; q<inch; q++)
{
for (int n=0; n<4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n+4];
sum2[n] += r0[n] * kptr[n+8];
sum3[n] += r0[n] * kptr[n+12];
sum4[n] += r0[n] * kptr[n+16];
sum5[n] += r0[n] * kptr[n+20];
sum6[n] += r0[n] * kptr[n+24];
sum7[n] += r0[n] * kptr[n+28];
}
kptr += 32;
r0 += 4;
}
for (int n=0; n<4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p+1);
float* output2_tm = top_blob_tm.channel(p+2);
float* output3_tm = top_blob_tm.channel(p+3);
output0_tm = output0_tm + r*4;
output1_tm = output1_tm + r*4;
output2_tm = output2_tm + r*4;
output3_tm = output3_tm + r*4;
for (int i=0; i<tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4);
const float* r0 = bottom_blob_tm.channel(tiles*r+i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q=0; q<inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr+4);
__m128 _k2 = _mm_loadu_ps(kptr+8);
__m128 _k3 = _mm_loadu_ps(kptr+12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q=0; q<inch; q++)
{
for (int n=0; n<4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n+4];
sum2[n] += r0[n] * kptr[n+8];
sum3[n] += r0[n] * kptr[n+12];
}
kptr += 16;
r0 += 4;
}
for (int n=0; n<4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p=remain_outch_start; p<outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r*4;
for (int i=0; i<tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4);
const float* r0 = bottom_blob_tm.channel(tiles*r+i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q=0; q<inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q=0; q<inch; q++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n=0; n<4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
#if __AVX__ || __SSE__
#if __AVX__
__m256 mul_2 = _mm256_set1_ps(2);
__m256 mul_4 = _mm256_set1_ps(4);
__m256 mul_8 = _mm256_set1_ps(8);
__m128 mul_2_s = _mm_set1_ps(2);
__m128 mul_4_s = _mm_set1_ps(4);
__m128 mul_8_s = _mm_set1_ps(8);
#else
__m128 mul_2 = _mm_set1_ps(2);
__m128 mul_4 = _mm_set1_ps(4);
__m128 mul_8 = _mm_set1_ps(8);
#endif
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j=0; j<nColBlocks; j++)
{
for(int i=0; i<nRowBlocks; i++)
{
#if __AVX__ || __SSE__
#if __AVX__
// load
__m256 s0 = _mm256_loadu_ps(out_tile);
__m256 s1 = _mm256_loadu_ps(out_tile + 6);
__m256 s2 = _mm256_loadu_ps(out_tile + 12);
__m256 s3 = _mm256_loadu_ps(out_tile + 18);
__m256 s4 = _mm256_loadu_ps(out_tile + 24);
__m256 s5 = _mm256_loadu_ps(out_tile + 30);
// w = A_T * W
__m256 w0 = _mm256_add_ps(s0, s1);
w0 = _mm256_add_ps(w0, s2);
w0 = _mm256_add_ps(w0, s3);
w0 = _mm256_add_ps(w0, s4);
__m256 w1 = _mm256_sub_ps(s1, s2);
__m256 temp = _mm256_mul_ps(s3, mul_2);
w1 = _mm256_add_ps(w1, temp);
temp = _mm256_mul_ps(s4, mul_2);
w1 = _mm256_sub_ps(w1, temp);
__m256 w2 = _mm256_add_ps(s1, s2);
temp = _mm256_mul_ps(s3, mul_4);
w2 = _mm256_add_ps(w2, temp);
temp = _mm256_mul_ps(s4, mul_4);
w2 = _mm256_add_ps(w2, temp);
__m256 w3 = _mm256_sub_ps(s1, s2);
temp = _mm256_mul_ps(s3, mul_8);
w3 = _mm256_add_ps(w3, temp);
temp = _mm256_mul_ps(s4, mul_8);
w3 = _mm256_sub_ps(w3, temp);
w3 = _mm256_add_ps(w3, s5);
// transpose w to w_t
__m128 d0, d1, d2, d3, d4, d5;
{
d0.m128_f32[0] = w0.m256_f32[0]; d0.m128_f32[1] = w1.m256_f32[0]; d0.m128_f32[2] = w2.m256_f32[0]; d0.m128_f32[3] = w3.m256_f32[0];
d1.m128_f32[0] = w0.m256_f32[1]; d1.m128_f32[1] = w1.m256_f32[1]; d1.m128_f32[2] = w2.m256_f32[1]; d1.m128_f32[3] = w3.m256_f32[1];
d2.m128_f32[0] = w0.m256_f32[2]; d2.m128_f32[1] = w1.m256_f32[2]; d2.m128_f32[2] = w2.m256_f32[2]; d2.m128_f32[3] = w3.m256_f32[2];
d3.m128_f32[0] = w0.m256_f32[3]; d3.m128_f32[1] = w1.m256_f32[3]; d3.m128_f32[2] = w2.m256_f32[3]; d3.m128_f32[3] = w3.m256_f32[3];
d4.m128_f32[0] = w0.m256_f32[4]; d4.m128_f32[1] = w1.m256_f32[4]; d4.m128_f32[2] = w2.m256_f32[4]; d4.m128_f32[3] = w3.m256_f32[4];
d5.m128_f32[0] = w0.m256_f32[5]; d5.m128_f32[1] = w1.m256_f32[5]; d5.m128_f32[2] = w2.m256_f32[5]; d5.m128_f32[3] = w3.m256_f32[5];
}
// Y = A_T * w_t
__m128 o0 = _mm_add_ps(d0, d1);
o0 = _mm_add_ps(o0, d2);
o0 = _mm_add_ps(o0, d3);
o0 = _mm_add_ps(o0, d4);
__m128 o1 = _mm_sub_ps(d1, d2);
__m128 temp_s = _mm_mul_ps(d3, mul_2_s);
o1 = _mm_add_ps(o1, temp_s);
temp_s = _mm_mul_ps(d4, mul_2_s);
o1 = _mm_sub_ps(o1, temp_s);
__m128 o2 = _mm_add_ps(d1, d2);
temp_s = _mm_mul_ps(d3, mul_4_s);
o2 = _mm_add_ps(o2, temp_s);
temp_s = _mm_mul_ps(d4, mul_4_s);
o2 = _mm_add_ps(o2, temp_s);
__m128 o3 = _mm_sub_ps(d1, d2);
temp_s = _mm_mul_ps(d3, mul_8_s);
o3 = _mm_add_ps(o3, temp_s);
temp_s = _mm_mul_ps(d4, mul_8_s);
o3 = _mm_sub_ps(o3, temp_s);
o3 = _mm_add_ps(o3, d5);
// save to top blob tm
__m128 bias00 = _mm_set1_ps(bias0);
o0 = _mm_add_ps(o0, bias00);
o1 = _mm_add_ps(o1, bias00);
o2 = _mm_add_ps(o2, bias00);
o3 = _mm_add_ps(o3, bias00);
_mm_storeu_ps(outRow0, o0);
_mm_storeu_ps(outRow1, o1);
_mm_storeu_ps(outRow2, o2);
_mm_storeu_ps(outRow3, o3);
#else
// load
__m128 s0_0 = _mm_loadu_ps(out_tile);
__m128 s0_4 = _mm_loadu_ps(out_tile + 4);
__m128 s1_0 = _mm_loadu_ps(out_tile + 6);
__m128 s1_4 = _mm_loadu_ps(out_tile + 10);
__m128 s2_0 = _mm_loadu_ps(out_tile + 12);
__m128 s2_4 = _mm_loadu_ps(out_tile + 16);
__m128 s3_0 = _mm_loadu_ps(out_tile + 18);
__m128 s3_4 = _mm_loadu_ps(out_tile + 22);
__m128 s4_0 = _mm_loadu_ps(out_tile + 24);
__m128 s4_4 = _mm_loadu_ps(out_tile + 28);
__m128 s5_0 = _mm_loadu_ps(out_tile + 30);
__m128 s5_4 = _mm_loadu_ps(out_tile + 34);
// w = A_T * W
__m128 w0_0 = _mm_add_ps(s0_0, s1_0);
w0_0 = _mm_add_ps(w0_0, s2_0);
w0_0 = _mm_add_ps(w0_0, s3_0);
w0_0 = _mm_add_ps(w0_0, s4_0);
__m128 w0_4 = _mm_add_ps(s0_4, s1_4);
w0_4 = _mm_add_ps(w0_4, s2_4);
w0_4 = _mm_add_ps(w0_4, s3_4);
w0_4 = _mm_add_ps(w0_4, s4_4);
__m128 w1_0 = _mm_sub_ps(s1_0, s2_0);
__m128 temp = _mm_mul_ps(s3_0, mul_2);
w1_0 = _mm_add_ps(w1_0, temp);
temp = _mm_mul_ps(s4_0, mul_2);
w1_0 = _mm_sub_ps(w1_0, temp);
__m128 w1_4 = _mm_sub_ps(s1_4, s2_4);
temp = _mm_mul_ps(s3_4, mul_2);
w1_4 = _mm_add_ps(w1_4, temp);
temp = _mm_mul_ps(s4_4, mul_2);
w1_4 = _mm_sub_ps(w1_4, temp);
__m128 w2_0 = _mm_add_ps(s1_0, s2_0);
temp = _mm_mul_ps(s3_0, mul_4);
w2_0 = _mm_add_ps(w2_0, temp);
temp = _mm_mul_ps(s4_0, mul_4);
w2_0 = _mm_add_ps(w2_0, temp);
__m128 w2_4 = _mm_add_ps(s1_4, s2_4);
temp = _mm_mul_ps(s3_4, mul_4);
w2_4 = _mm_add_ps(w2_4, temp);
temp = _mm_mul_ps(s4_4, mul_4);
w2_4 = _mm_add_ps(w2_4, temp);
__m128 w3_0 = _mm_sub_ps(s1_0, s2_0);
temp = _mm_mul_ps(s3_0, mul_8);
w3_0 = _mm_add_ps(w3_0, temp);
temp = _mm_mul_ps(s4_0, mul_8);
w3_0 = _mm_sub_ps(w3_0, temp);
w3_0 = _mm_add_ps(w3_0, s5_0);
__m128 w3_4 = _mm_sub_ps(s1_4, s2_4);
temp = _mm_mul_ps(s3_4, mul_8);
w3_4 = _mm_add_ps(w3_4, temp);
temp = _mm_mul_ps(s4_4, mul_8);
w3_4 = _mm_sub_ps(w3_4, temp);
w3_4 = _mm_add_ps(w3_4, s5_4);
// transpose w to w_t
__m128 d0, d1, d2, d3, d4, d5;
{
d0.m128_f32[0] = w0_0.m128_f32[0]; d0.m128_f32[1] = w1_0.m128_f32[0]; d0.m128_f32[2] = w2_0.m128_f32[0]; d0.m128_f32[3] = w3_0.m128_f32[0];
d1.m128_f32[0] = w0_0.m128_f32[1]; d1.m128_f32[1] = w1_0.m128_f32[1]; d1.m128_f32[2] = w2_0.m128_f32[1]; d1.m128_f32[3] = w3_0.m128_f32[1];
d2.m128_f32[0] = w0_0.m128_f32[2]; d2.m128_f32[1] = w1_0.m128_f32[2]; d2.m128_f32[2] = w2_0.m128_f32[2]; d2.m128_f32[3] = w3_0.m128_f32[2];
d3.m128_f32[0] = w0_0.m128_f32[3]; d3.m128_f32[1] = w1_0.m128_f32[3]; d3.m128_f32[2] = w2_0.m128_f32[3]; d3.m128_f32[3] = w3_0.m128_f32[3];
d4.m128_f32[0] = w0_4.m128_f32[0]; d4.m128_f32[1] = w1_4.m128_f32[0]; d4.m128_f32[2] = w2_4.m128_f32[0]; d4.m128_f32[3] = w3_4.m128_f32[0];
d5.m128_f32[0] = w0_4.m128_f32[1]; d5.m128_f32[1] = w1_4.m128_f32[1]; d5.m128_f32[2] = w2_4.m128_f32[1]; d5.m128_f32[3] = w3_4.m128_f32[1];
}
// Y = A_T * w_t
__m128 o0 = _mm_add_ps(d0, d1);
o0 = _mm_add_ps(o0, d2);
o0 = _mm_add_ps(o0, d3);
o0 = _mm_add_ps(o0, d4);
__m128 o1 = _mm_sub_ps(d1, d2);
__m128 temp_s = _mm_mul_ps(d3, mul_2);
o1 = _mm_add_ps(o1, temp_s);
temp_s = _mm_mul_ps(d4, mul_2);
o1 = _mm_sub_ps(o1, temp_s);
__m128 o2 = _mm_add_ps(d1, d2);
temp_s = _mm_mul_ps(d3, mul_4);
o2 = _mm_add_ps(o2, temp_s);
temp_s = _mm_mul_ps(d4, mul_4);
o2 = _mm_add_ps(o2, temp_s);
__m128 o3 = _mm_sub_ps(d1, d2);
temp_s = _mm_mul_ps(d3, mul_8);
o3 = _mm_add_ps(o3, temp_s);
temp_s = _mm_mul_ps(d4, mul_8);
o3 = _mm_sub_ps(o3, temp_s);
o3 = _mm_add_ps(o3, d5);
// save to top blob tm
__m128 bias00 = _mm_set1_ps(bias0);
o0 = _mm_add_ps(o0, bias00);
o1 = _mm_add_ps(o1, bias00);
o2 = _mm_add_ps(o2, bias00);
o3 = _mm_add_ps(o3, bias00);
_mm_storeu_ps(outRow0, o0);
_mm_storeu_ps(outRow1, o1);
_mm_storeu_ps(outRow2, o2);
_mm_storeu_ps(outRow3, o3);
#endif
#else
// TODO AVX2
float s0[6],s1[6],s2[6],s3[6],s4[6],s5[6];
float w0[6],w1[6],w2[6],w3[6];
float d0[4],d1[4],d2[4],d3[4],d4[4],d5[4];
float o0[4],o1[4],o2[4],o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 6];
s2[n] = out_tile[n+12];
s3[n] = out_tile[n+18];
s4[n] = out_tile[n+24];
s5[n] = out_tile[n+30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n];
w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n];
w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0];
d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1];
d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2];
d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3];
d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4];
d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n];
o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n];
o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
#endif
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float *outptr = out;
const float *img = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float *r0 = img;
const float *r1 = img + w;
const float *r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __AVX__ || __SSE__
__m128 k0_data = _mm_loadu_ps(k0);
__m128 k1_data = _mm_loadu_ps(k1);
__m128 k2_data = _mm_loadu_ps(k2);
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
__m128 sum = _mm_setzero_ps();
__m128 r0_data = _mm_loadu_ps(r0);
__m128 r1_data = _mm_loadu_ps(r1);
__m128 r2_data = _mm_loadu_ps(r2);
sum = _mm_add_ps(_mm_mul_ps(k0_data, r0_data), sum);
sum = _mm_add_ps(_mm_mul_ps(k1_data, r1_data), sum);
sum = _mm_add_ps(_mm_mul_ps(k2_data, r2_data), sum);
*outptr += sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2];
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
#else
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
#endif
}
}
}
|
main.c | /*
Copyright 2017 James Fong
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define VID_W (1280)
#define VID_H (720)
#define PATH_QUALITY 256
#define PATH_TIME 0.5
#define PATH_LEN 96
#define OCTOPUS_ARMS 8
#define FRAME_START 0
#define FRAME_COUNT 300
#define RENDER_THICK 50
#define DRAW_OCTOPUS 0
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795028841971694
#endif
typedef struct Vec3 {
double x, y, z;
} Vec3;
Vec3 vec_new(double x, double y, double z) {
Vec3 vec = {x, y, z};
return vec;
}
typedef struct Color {
unsigned char r, g, b;
} Color;
typedef struct Path {
Vec3 points[PATH_LEN];
} Path;
typedef struct Walker {
Vec3 loc;
Vec3 vel;
} Walker;
Path g_octopus[OCTOPUS_ARMS];
Color g_octo_color[OCTOPUS_ARMS];
Color g_frame[VID_W * VID_H];
Color* g_plate;
int g_plate_w;
int g_plate_h;
Color* g_starmap;
int g_starmap_w;
int g_starmap_h;
Vec3 g_earth_loc = {0, 0, 0};
double g_earth_radius = 6371.008; // Kilometers
double g_earth_ang_spd = 0.26251614; // Radians per hour
Vec3 g_cam_loc = {0, 0, -3 * 6371.008};
Vec3 g_cam_right = {1, 0, 0};
Vec3 g_cam_up = {0, 1, 0};
Vec3 g_cam_forward = {0, 0, 1};
double g_cam_focal_len = 2.0;
int clamp(int val, int min, int max) {
if (val < min) return min;
if (val >= max) return max - 1;
return val;
}
double clampd(double val, double min, double max) {
if (val < min) return min;
if (val > max) return max;
return val;
}
Color debug_to_color(Vec3 vec) {
Color ret = {vec.x * 255, vec.y * 255, vec.z * 255};
return ret;
}
Color debug_to_color2(Vec3 vec) {
vec.x = (clampd(vec.x, -1, 1) + 1.0) / 2.0;
vec.y = (clampd(vec.y, -1, 1) + 1.0) / 2.0;
vec.z = (clampd(vec.z, -1, 1) + 1.0) / 2.0;
Color ret = {vec.x * 255, vec.y * 255, vec.z * 255};
return ret;
}
Color debug_to_color3(Vec3 vec) {
vec.x = clampd(vec.x, 0, 1);
vec.y = clampd(vec.y, 0, 1);
vec.z = clampd(vec.z, 0, 1);
Color ret = {vec.x * 255, vec.y * 255, vec.z * 255};
return ret;
}
int load_plate() {
int n, k, nn;
unsigned char* data = stbi_load("plate.png",
&g_plate_w, &g_plate_h, &n, sizeof(Color));
if (data) {
printf("Loaded equirectangular map successfully.\n");
g_plate = (Color*) data;
} else {
fprintf(stderr, "Fatal error loading Earth map image!\n");
return 0;
}
data = stbi_load("starmap.png",
&g_starmap_w, &g_starmap_h, &n, sizeof(Color));
if (data) {
printf("Loaded equirectangular starmap successfully.\n");
g_starmap = (Color*) data;
} else {
fprintf(stderr, "Fatal error loading Starmap image!\n");
return 0;
}
data = stbi_load("set.png",
&nn, &k, &n, sizeof(Color));
if (data) {
printf("Loaded rainbow map successfully.\n");
Color* rainbow = (Color*) data;
for (int i = 0; i < OCTOPUS_ARMS; ++i) {
g_octo_color[i] = rainbow[i];
}
stbi_image_free(data);
} else {
fprintf(stderr, "Fatal error loading rainbow map image!\n");
return 0;
}
return 1;
}
Color get_equirec_color(
double rad_nort, double rad_east,
Color* image, int img_w, int img_h) {
int pix_x = (0.5 + (rad_east / (M_PI * 2))) * img_w;
int pix_y = (0.5 - (rad_nort / M_PI)) * img_h;
// Clamp to valid range
pix_x = clamp(pix_x, 0, img_w);
pix_y = clamp(pix_y, 0, img_h);
return image[pix_x + pix_y * img_w];
}
Color get_starmap_color(double rad_nort, double rad_east) {
return get_equirec_color(rad_nort, rad_east,
g_starmap, g_starmap_w, g_starmap_h);
}
Color get_plate_color(double rad_nort, double rad_east) {
return get_equirec_color(rad_nort, rad_east, g_plate, g_plate_w, g_plate_h);
}
void set_frame_color(int x, int y, Color c) {
if (x < 0 || x >= VID_W) {
fprintf(stderr, "Tried to write out of bounds, x: %d", x);
return;
}
if (y < 0 || y >= VID_H) {
fprintf(stderr, "Tried to write out of bounds, x: %d", x);
return;
}
g_frame[x + y * VID_W] = c;
}
void unload_plate() {
stbi_image_free((unsigned char*) g_plate);
stbi_image_free((unsigned char*) g_starmap);
}
int export_frame(const char* fname) {
int success = stbi_write_jpg(fname, VID_W, VID_H, sizeof(Color),
(void*) g_frame, VID_W * sizeof(Color));
if (success) {
printf("Wrote frame to file %s\n", fname);
return 1;
} else {
fprintf(stderr, "Error writing to file %s\n", fname);
return 0;
}
}
double vec_dot(Vec3 a, Vec3 b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
// Up and forward gives right
Vec3 vec_cross(Vec3 a, Vec3 b) {
Vec3 ret = {a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x};
return ret;
}
double vec_magsq(Vec3 vec) {
return vec.x * vec.x + vec.y * vec.y + vec.z * vec.z;
}
double vec_mag(Vec3 vec) {
return sqrt(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z);
}
int vec_is_zero(Vec3 vec) {
return vec.x == 0.0 && vec.y == 0.0 && vec.z == 0.0;
}
Vec3 vec_add(Vec3 a, Vec3 b) {
Vec3 ret = {a.x + b.x, a.y + b.y, a.z + b.z};
return ret;
}
Vec3 vec_sub(Vec3 a, Vec3 b) {
Vec3 ret = {a.x - b.x, a.y - b.y, a.z - b.z};
return ret;
}
Vec3 vec_mul(Vec3 vec, double amnt) {
Vec3 ret = {vec.x * amnt, vec.y * amnt, vec.z * amnt};
return ret;
}
Vec3 vec_norm(Vec3 vec) {
double mag = vec_mag(vec);
Vec3 ret = {vec.x / mag, vec.y / mag, vec.z / mag};
return ret;
}
Vec3 vec_resize(Vec3 vec, double len) {
return vec_mul(vec_norm(vec), len);
}
Vec3 vec_project(Vec3 vec, Vec3 norm_dir) {
return vec_mul(norm_dir, vec_dot(vec, norm_dir));
}
Vec3 vec_reject(Vec3 vec, Vec3 norm_dir) {
return vec_sub(vec, vec_project(vec, norm_dir));
}
Vec3 vec_angle_axis_rot(Vec3 axis, double angle, Vec3 vec) {
// Rodrigues' rotation formula
return vec_add(vec_add(
vec_mul(vec, cos(angle)),
vec_mul(vec_cross(axis, vec), sin(angle))),
vec_mul(axis, vec_dot(axis, vec) * (1 - cos(angle))));
}
double sq(double x) {
return x * x;
}
void orthogonalize(Vec3 dir, Vec3 skyward,
Vec3* forward, Vec3* right, Vec3* up) {
*forward = vec_norm(dir);
*right = vec_norm(vec_cross(skyward, *forward));
*up = vec_norm(vec_cross(*forward, *right));
}
void cam_lookat(Vec3 lookat, Vec3 sidereal_up) {
orthogonalize(vec_sub(lookat, g_cam_loc), sidereal_up,
&g_cam_forward, &g_cam_right, &g_cam_up);
}
#define NO_HIT -1
int sphere_intersect_ray(Vec3 O, Vec3 L, double R, Vec3 C, Vec3* interp) {
if (vec_magsq(vec_sub(O, C)) < sq(R)) {
*interp = O;
return 0;
}
double radicand =
sq(vec_dot(L, vec_sub(O, C))) - vec_magsq(vec_sub(O, C)) + sq(R);
if (radicand < 0) {
return NO_HIT;
}
double dist = -vec_dot(L, vec_sub(O, C)) - sqrt(radicand);
if (dist < 0) {
return NO_HIT;
}
*interp = vec_add(O, vec_mul(L, dist));
return dist;
}
int intersect_earth(Vec3 src, Vec3 norm_dir, Vec3* interp) {
return sphere_intersect_ray(
src, norm_dir, g_earth_radius, g_earth_loc, interp);
}
Vec3 lat_long_to_dir(double rad_nort, double rad_east, double mag) {
Vec3 ret;
ret.y = sin(rad_nort) * mag;
ret.x = cos(rad_east) * cos(rad_nort) * mag;
ret.z = sin(rad_east) * cos(rad_nort) * mag;
return ret;
}
Color sky_color(Vec3 norm_dir) {
Vec3 dir_collap = norm_dir;
dir_collap.y = 0;
dir_collap = vec_norm(dir_collap);
double rad_nort = asin(norm_dir.y);
double rad_east = atan2(dir_collap.z, dir_collap.x);
return get_starmap_color(rad_nort, rad_east);
}
Color earth_color(Vec3 surf_loc) {
Vec3 dir = vec_norm(vec_sub(surf_loc, g_earth_loc));
Vec3 dir_collap = dir;
dir_collap.y = 0;
dir_collap = vec_norm(dir_collap);
double rad_nort = asin(dir.y);
double rad_east = atan2(dir_collap.z, dir_collap.x);
return get_plate_color(rad_nort, rad_east);
}
Color color_add(Color ca, Color cb) {
int r = clamp(ca.r + cb.r, 0, 256);
int g = clamp(ca.g + cb.g, 0, 256);
int b = clamp(ca.b + cb.b, 0, 256);
Color ret = {r, g, b};
return ret;
}
Color color_mul(Color c, double d) {
Color ret = {c.r * d, c.g * d, c.b * d};
return ret;
}
double my_fmod(double x, double y) {
return fmod(fmod(x, y) + y, y);
}
Color g_atmo_color = {0x00, 0xCC, 0xFF};
Color g_pulse_color = {0xFF, 0xFF, 0xFF};
Color raytrace(Vec3 src, Vec3 norm_dir, int pixel_id, double anim_time) {
Vec3 hit;
double depth = DBL_MAX;
Color color;
Vec3 other_hit;
double other_depth;
other_depth = intersect_earth(src, norm_dir, &other_hit);
if (other_depth != NO_HIT && other_depth < depth) {
hit = other_hit;
depth = other_depth;
color = earth_color(hit);
double glow_str =
1.0 - vec_dot(
vec_mul(norm_dir, -1),
vec_norm(vec_sub(hit, g_earth_loc)));
glow_str = clampd(glow_str * glow_str * glow_str, 0.0, 1.0) * 0.5;
color = color_add(color, color_mul(g_atmo_color, glow_str));
}
double dsteps_per_day = 24.0 / PATH_TIME;
int steps_per_day = dsteps_per_day;
int pulse_shift = (int) (anim_time * dsteps_per_day);
if (DRAW_OCTOPUS && sphere_intersect_ray(src, norm_dir,
g_earth_radius + (RENDER_THICK / 2),
g_earth_loc, &other_hit) != NO_HIT) {
for (int arm_idx_raw = 0; arm_idx_raw < OCTOPUS_ARMS; ++arm_idx_raw) {
// FOR INTRODUCTION
/*
if (anim_time < 1) {
break;
}
*/
int arm_idx = arm_idx_raw;//(arm_idx_raw + pixel_id) % OCTOPUS_ARMS;
int hit_arm = 0;
double hit_step_time = 0.0;
for (int step_idx = 0; step_idx < PATH_LEN; ++step_idx) {
double step_time = (((step_idx - pulse_shift) % steps_per_day)
+ steps_per_day) % steps_per_day;
step_time /= dsteps_per_day;
// FOR INTRODUCTION
/*
int delta = (((step_idx - pulse_shift) % steps_per_day)
+ steps_per_day) % steps_per_day;
if (anim_time < 16) {
if (step_idx - pulse_shift > -1 * steps_per_day ||
delta != steps_per_day - 2) {
continue;
}
} else if (anim_time < 18) {
if (step_idx - pulse_shift > -16 * steps_per_day &&
delta != steps_per_day - 2) {
continue;
}
}
*/
other_depth = sphere_intersect_ray(src, norm_dir,
RENDER_THICK,
g_octopus[arm_idx].points[step_idx], &other_hit);
if (other_depth != NO_HIT && other_depth < depth) {
hit = other_hit;
depth = other_depth;
color = g_octo_color[arm_idx];
if (step_time > hit_step_time || !hit_arm) {
hit_step_time = step_time;
}
hit_arm = 1;
}
}
if (hit_arm) {
double intense = hit_step_time;
intense *= intense;
color = color_mul(color, 0.5 + (intense / 2.0));
}
}
}
if (depth == DBL_MAX) {
color = sky_color(norm_dir);
}
return color;
}
int render_frame(double anim_time) {
double asp_rat = ((double) VID_W) / ((double) VID_H);
Vec3 canvas_disp = vec_mul(g_cam_forward, g_cam_focal_len);
#pragma omp parallel for schedule(dynamic)
for (int y = 0; y < VID_H; ++y) {
double c_y = (((double) y) / ((double) VID_H)) * 2.0 - 1.0;
for (int x = 0; x < VID_W; ++x) {
double c_x = (((double) x) / ((double) VID_W)) * 2.0 - 1.0;
c_x *= asp_rat;
Vec3 ray_dir = vec_norm(
vec_add(vec_add(
vec_mul(g_cam_up, -c_y),
vec_mul(g_cam_right, c_x)),
canvas_disp));
set_frame_color(x, y, raytrace(
g_cam_loc, ray_dir, y * VID_W + x, anim_time));
}
}
return 1;
}
Walker apply_velocity_on_earth(Walker walker, double delta, double coriol) {
if (vec_is_zero(walker.vel)) return walker;
if (coriol != 0.0) {
Vec3 effect = vec_cross(
vec_new(0, g_earth_ang_spd * -2, 0),
vec_reject(walker.vel, vec_new(0, 1, 0)));
walker.vel = vec_add(walker.vel, vec_mul(effect, delta * -coriol));
}
Vec3 surface_loc = vec_sub(walker.loc, g_earth_loc);
Vec3 axis = vec_norm(vec_cross(surface_loc, walker.vel));
double angle = (vec_mag(walker.vel) / g_earth_radius) * delta;
walker.loc = vec_add(
g_earth_loc,
vec_resize(vec_angle_axis_rot(axis, angle, surface_loc),
g_earth_radius));
walker.vel = vec_reject(
vec_angle_axis_rot(axis, angle, walker.vel), vec_norm(surface_loc));
return walker;
}
void generate_octopus(Vec3 abs_origin, double speed, double coriol
, Vec3 sidereal_up) {
Vec3 rel_origin =
vec_mul(vec_norm(vec_sub(abs_origin, g_earth_loc)), g_earth_radius);
Vec3 ortho_forward;
Vec3 ortho_right;
Vec3 ortho_up;
orthogonalize(rel_origin, sidereal_up,
&ortho_forward, &ortho_right, &ortho_up);
ortho_right = vec_mul(ortho_right, -1);
#pragma omp parallel for
for (int arm_idx = 0; arm_idx < OCTOPUS_ARMS; ++arm_idx) {
double angle =
(((double) arm_idx) / ((double) OCTOPUS_ARMS)) * 2 * M_PI;
Walker walker = {rel_origin, vec_add(
vec_mul(ortho_right, cos(angle) * speed),
vec_mul(ortho_up, sin(angle) * speed))};
for (int step_idx = 0; step_idx < PATH_LEN; ++step_idx) {
for (int rep = 0; rep < PATH_QUALITY; ++rep) {
walker = apply_velocity_on_earth(
walker,
PATH_TIME / ((double) PATH_QUALITY),
coriol);
}
g_octopus[arm_idx].points[step_idx] = walker.loc;
}
}
}
// Render a sequence of images
int main(int argc, char* argv[]) {
printf("Coriolis visualizer\n");
#ifdef _OPENMP
double timer_start, timer_end;
printf("Using OpenMP!\n");
#else
clock_t timer_start, timer_end;
#endif
fflush(stdout);
if (!load_plate()) return -1;
fflush(stdout);
// idle
/*
g_cam_loc = lat_long_to_dir(0, M_PI / -2, 18000);
generate_octopus(g_cam_loc, 100, 0.5, vec_new(0, 1, 0));
cam_lookat(g_earth_loc, vec_new(0, 1, 0));
*/
double time_elapse;// 30 * 80 = 2400
for (int frame_idx = FRAME_START;
frame_idx < FRAME_START + FRAME_COUNT; ++frame_idx) {
#ifdef _OPENMP
timer_start = omp_get_wtime();
#else
timer_start = clock();
#endif
double anim_time = ((double) frame_idx) / 30.0;
// Earth arrival (300 frames)
/*
g_cam_loc = lat_long_to_dir(0,
M_PI / -2,
pow(2.71828, -anim_time * 1.5424948470) * 5000000 + 18000);
cam_lookat(g_earth_loc, vec_new(0, 1, 0));
*/
// Apollo blue marble recreation
g_cam_loc = lat_long_to_dir(
-0.4994,
0.6592,
g_earth_radius + 29000);
g_cam_focal_len = 4.0;
cam_lookat(g_earth_loc, vec_new(0, 1, 0));
// Introduction
/*
g_cam_loc = lat_long_to_dir(0, M_PI / -2, 18000);
generate_octopus(
g_cam_loc,
100,
clampd((anim_time - 20.0) / 10, 0.0, 0.5),
vec_new(0, 1, 0));
cam_lookat(g_earth_loc, vec_new(0, 1, 0));
*/
/* // Oscillate between latitudes
g_cam_loc = lat_long_to_dir(
sin(anim_time * (M_PI / 2) * 0.05) * 0.7,
M_PI / -2,
18000);
generate_octopus(g_cam_loc, 100, 0.5, vec_new(0, 1, 0));
cam_lookat(g_earth_loc, vec_new(0, 1, 0));
*/
/* // Fixed camera of above
g_cam_loc = lat_long_to_dir((M_PI / 180.0) * 15.0, M_PI / -2, 18000);
generate_octopus(lat_long_to_dir(
sin(anim_time * (M_PI / 2) * 0.05) * 0.7, M_PI / -2, 10000),
100, 0.5, vec_new(0, 1, 0));
cam_lookat(g_earth_loc, vec_new(0, 1, 0));
*/
/*// Oscillate between latitudes while also rotating around the globe
g_cam_loc = lat_long_to_dir(sin(anim_time * (M_PI / 2) * 0.05) * 0.7,
anim_time * (M_PI / 2) * 0.2 + (M_PI / -2), 18000);
generate_octopus(g_cam_loc, 100, 0.5, vec_new(0, 1, 0));
cam_lookat(g_earth_loc, vec_new(0, 1, 0));
*/
// Polar orbit around the globe
/*
g_cam_loc = lat_long_to_dir(
(anim_time * (2 * M_PI)) / 80.0, M_PI / -2, 18000);
generate_octopus(g_cam_loc, 100, 0.5, vec_new(1, 0, 0));
cam_lookat(g_earth_loc, vec_new(1, 0, 0));
*/
// Polar flyby
/*
g_cam_focal_len = 4.0;
g_cam_loc = lat_long_to_dir(
0, (M_PI / -2) + (M_PI / 16), g_earth_radius + 4000);
Vec3 swirl_loc = lat_long_to_dir(
(anim_time * (M_PI)) / 80.0 - M_PI / 2,
M_PI / -2, g_earth_radius);
generate_octopus(swirl_loc, 100, 0.5, vec_new(1, 0, 0));
cam_lookat(swirl_loc, vec_norm(g_cam_loc));
*/
// Camera rolling transition
/*
g_cam_loc = lat_long_to_dir(0, M_PI / -2, 18000);
Vec3 rot_up = vec_new(sin(
anim_time * (M_PI / 2) * 0.2),
cos(anim_time * (M_PI / 2) * 0.2), 0);
generate_octopus(g_cam_loc, 100, 0.5, rot_up);
cam_lookat(g_earth_loc, rot_up);
*/
if (!render_frame(anim_time)) return -1;
char fname_buff[16];
sprintf(fname_buff, "frame/%06d.jpg", frame_idx);
if (!export_frame(fname_buff)) return -1;
#ifdef _OPENMP
timer_end = omp_get_wtime();
time_elapse = timer_end - timer_start;
#else
timer_end = clock();
time_elapse = (double)(timer_end - timer_start) / CLOCKS_PER_SEC;
#endif
printf("Time taken: %f seconds\n", time_elapse);
fflush(stdout);
}
unload_plate();
return 0;
}
|
ZAdaptiveNormals.h | /****************************************************************************
**
** Copyright (C) 2017 TU Wien, ACIN, Vision 4 Robotics (V4R) group
** Contact: v4r.acin.tuwien.ac.at
**
** This file is part of V4R
**
** V4R is distributed under dual licenses - GPLv3 or closed source.
**
** GNU General Public License Usage
** V4R is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published
** by the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** V4R is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** Please review the following information to ensure the GNU General Public
** License requirements will be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
**
** Commercial License Usage
** If GPL is not suitable for your project, you must purchase a commercial
** license to use V4R. Licensees holding valid commercial V4R licenses may
** use this file in accordance with the commercial license agreement
** provided with the Software or, alternatively, in accordance with the
** terms contained in a written agreement between you and TU Wien, ACIN, V4R.
** For licensing terms and conditions please contact office<at>acin.tuwien.ac.at.
**
**
** The copyright holder additionally grants the author(s) of the file the right
** to use, copy, modify, merge, publish, distribute, sublicense, and/or
** sell copies of their contributions without any restrictions.
**
****************************************************************************/
#ifndef SURFACE_ZADAPTIVE_NORMALS_HH
#define SURFACE_ZADAPTIVE_NORMALS_HH
#include <math.h>
#include <omp.h>
#include <pcl/common/eigen.h>
#include <pcl/common/time.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/search/kdtree.h>
#include <boost/shared_ptr.hpp>
#include <iostream>
#include <stdexcept>
#include "v4r/attention_segmentation/EPUtils.h"
namespace v4r {
/**
* Surface normals estimation
*/
template <typename T>
class ZAdaptiveNormals {
public:
class Parameter {
public:
double radius; // euclidean inlier radius
int kernel; // kernel radius [px]
bool adaptive; // Activate z-adaptive normals calcualation
float kappa; // gradient
float d; // constant
float kernel_radius[8]; // Kernel radius for each 0.5 meter intervall (0-4m)
Parameter(double _radius = 0.02, int _kernel = 5, bool _adaptive = false, float _kappa = 0.005125, float _d = 0.0)
: radius(_radius), kernel(_kernel), adaptive(_adaptive), kappa(_kappa), d(_d) {}
};
private:
Parameter param;
float NaN;
int width, height;
float sqr_radius;
cv::Mat mask;
typename pcl::PointCloud<T>::Ptr cloud;
pcl::PointCloud<pcl::Normal>::Ptr normals;
void estimateNormals();
void getIndices(int u, int v, int kernel, std::vector<int> &indices) const;
float computeNormal(const std::vector<int> &indices, Eigen::Matrix3f &eigen_vectors) const;
inline int getIdx(short x, short y) const;
inline short X(int idx) const;
inline short Y(int idx) const;
inline bool checkNotNaN(const T &p) const;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
ZAdaptiveNormals(Parameter p = Parameter());
~ZAdaptiveNormals();
void setParameter(Parameter p);
void setInputCloud(const typename pcl::PointCloud<T>::Ptr &_cloud);
void compute();
void compute(const std::vector<int> &_mask);
void getNormals(pcl::PointCloud<pcl::Normal> &_normals);
// for compatibility
void getNormals(pcl::PointCloud<pcl::Normal>::Ptr &_normals);
// print normals
void print(std::string file_name);
};
/*********************** INLINE METHODES **************************/
template <typename T>
inline int ZAdaptiveNormals<T>::getIdx(short x, short y) const {
return y * width + x;
}
template <typename T>
inline short ZAdaptiveNormals<T>::X(int idx) const {
return idx % width;
}
template <typename T>
inline short ZAdaptiveNormals<T>::Y(int idx) const {
return idx / width;
}
// return true of point is not NaN
template <typename T>
inline bool ZAdaptiveNormals<T>::checkNotNaN(const T &p) const {
if (std::isnan(p.x) || std::isnan(p.y) || std::isnan(p.z)) {
return (false);
}
return (true);
}
/********************** ZAdaptiveNormals ************************
* Constructor/Destructor
*/
template <typename T>
ZAdaptiveNormals<T>::ZAdaptiveNormals(Parameter p) {
NaN = std::numeric_limits<float>::quiet_NaN();
setParameter(p);
param.kernel_radius[0] = 3;
param.kernel_radius[1] = 3;
param.kernel_radius[2] = 3;
param.kernel_radius[3] = 3;
param.kernel_radius[4] = 4;
param.kernel_radius[5] = 5;
param.kernel_radius[6] = 6;
param.kernel_radius[7] = 7;
}
template <typename T>
ZAdaptiveNormals<T>::~ZAdaptiveNormals() {}
/************************** PUBLIC *************************/
/**
* setInputCloud
*/
template <typename T>
void ZAdaptiveNormals<T>::setInputCloud(const typename pcl::PointCloud<T>::Ptr &_cloud) {
if (!_cloud->isOrganized())
throw std::runtime_error("[ZAdaptiveNormals::compute] Need an organized point cloud!");
cloud = _cloud;
width = cloud->width;
height = cloud->height;
normals.reset(new pcl::PointCloud<pcl::Normal>);
normals->points.resize(cloud->points.size());
normals->width = cloud->width;
normals->height = cloud->height;
normals->is_dense = cloud->is_dense;
}
/**
* compute the normals
*/
template <typename T>
void ZAdaptiveNormals<T>::compute() {
if (cloud.get() == 0)
throw std::runtime_error("[ZAdaptiveNormals::compute] No point cloud available!");
mask = cv::Mat_<int>::ones(height, width);
estimateNormals();
}
/**
* compute the normals using a mask
*/
template <typename T>
void ZAdaptiveNormals<T>::compute(const std::vector<int> &_mask) {
if (cloud.get() == 0)
throw std::runtime_error("[ZAdaptiveNormals::compute] No point cloud available!");
mask = cv::Mat_<int>::zeros(height, width);
for (int i = 0; i < _mask.size(); ++i) {
int idx = _mask.at(i);
mask.at<int>(Y(idx), X(idx)) = 1;
}
estimateNormals();
}
/**
* getNormals
*/
template <typename T>
void ZAdaptiveNormals<T>::getNormals(pcl::PointCloud<pcl::Normal> &_normals) {
_normals = *normals;
}
template <typename T>
void ZAdaptiveNormals<T>::getNormals(pcl::PointCloud<pcl::Normal>::Ptr &_normals) {
_normals = normals;
}
/**
* setParameter
*/
template <typename T>
void ZAdaptiveNormals<T>::setParameter(Parameter p) {
param = p;
sqr_radius = p.radius * p.radius;
}
/************************** PRIVATE ************************/
/**
* GetIndices of the neighbourhood of the point depending on the kernel size
*/
template <typename T>
void ZAdaptiveNormals<T>::getIndices(int u, int v, int kernel, std::vector<int> &indices) const {
indices.clear();
const T &pt = cloud->points.at(getIdx(u, v));
for (int vkernel = -kernel; vkernel <= kernel; ++vkernel) {
for (int ukernel = -kernel; ukernel <= kernel; ++ukernel) {
int y = v + vkernel;
int x = u + ukernel;
float center_dist = sqrt(vkernel * vkernel + ukernel * ukernel);
if ((x > 0) && (y > 0) && (x < width) && (y < height)) {
int idx = getIdx(x, y);
const T &pt1 = cloud->points.at(idx);
if (checkNotNaN(pt1)) {
float new_sqr_radius = sqr_radius;
if (param.adaptive) {
float val = param.kappa * center_dist * pt1.z + param.d;
new_sqr_radius = val * val;
}
if ((pt.getVector3fMap() - pt1.getVector3fMap()).squaredNorm() < new_sqr_radius) {
indices.push_back(idx);
}
}
}
}
}
}
/**
* ComputeNormal
*/
template <typename T>
float ZAdaptiveNormals<T>::computeNormal(const std::vector<int> &indices, Eigen::Matrix3f &eigen_vectors) const {
if (indices.size() < 4)
return NaN;
Eigen::Vector3f mean;
mean.setZero();
v4r::computeMean<T>(*cloud, mean, indices);
Eigen::Matrix3f cov;
v4r::computeCovarianceMatrix<T>(*cloud, mean, cov, indices);
Eigen::Vector3f eigen_values;
pcl::eigen33(cov, eigen_vectors, eigen_values);
float eigsum = eigen_values.sum();
if (eigsum != 0)
return fabs(eigen_values[0] / eigsum);
return NaN;
}
/**
* EstimateNormals
*/
template <typename T>
void ZAdaptiveNormals<T>::estimateNormals() {
bool havenan = false;
#pragma omp parallel for shared(havenan)
for (int v = 0; v < height; v++) {
for (int u = 0; u < width; u++) {
if (mask.at<int>(v, u) > 0) {
std::vector<int> indices;
int idx = getIdx(u, v);
T &pt = cloud->points.at(idx);
pcl::Normal &n = normals->points.at(idx);
if (checkNotNaN(pt)) {
if (param.adaptive) {
int dist = (int)(pt.z * 2); // *2 => every 0.5 meter another kernel radius
if (dist > 7)
dist = 7;
getIndices(u, v, param.kernel_radius[dist], indices);
} else
getIndices(u, v, param.kernel, indices);
}
if (indices.size() < 4) {
#pragma omp critical
{ havenan = true; }
n.normal[0] = NaN;
n.normal[1] = NaN;
n.normal[2] = NaN;
pt.x = NaN;
pt.y = NaN;
pt.z = NaN;
continue;
}
EIGEN_ALIGN16 Eigen::Matrix3f eigen_vectors;
n.curvature = computeNormal(indices, eigen_vectors);
n.normal[0] = eigen_vectors(0, 0);
n.normal[1] = eigen_vectors(1, 0);
n.normal[2] = eigen_vectors(2, 0);
// orient normal to us
if (n.getNormalVector3fMap().dot(pt.getVector3fMap()) > 0) {
n.getNormalVector4fMap() *= -1;
}
// the fourth parameter is to complete hessian form --> d coefficient in the plane
n.getNormalVector4fMap()[3] = 0;
n.getNormalVector4fMap()[3] = -1 * n.getNormalVector3fMap().dot(pt.getVector3fMap());
}
}
}
if (havenan) {
cloud->is_dense = false;
normals->is_dense = false;
}
}
/**
* Print normals into file
*/
template <typename T>
void ZAdaptiveNormals<T>::print(std::string file_name) {
FILE *f = std::fopen(file_name.c_str(), "w");
for (int i = 0; i < normals->size(); ++i) {
pcl::Normal n = normals->points.at(i);
fprintf(f, "%d %f %f %f %f \n", i, n.normal[0], n.normal[1], n.normal[2], n.curvature);
}
std::fclose(f);
}
} // namespace v4r
#endif
|
convolution_sgemm_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void im2col_sgemm_pack8to1_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack8to1_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void im2col_sgemm_pack8to1_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack8to1_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
extern void im2col_sgemm_pack8to1_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack8to1_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
int64_t* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m256i _v = _mm256_loadu_si256((const __m256i*)img0);
_mm256_storeu_si256((__m256i*)tmpptr, _v);
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
int64_t* tmpptr = tmp.channel(i / 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m128i _v = _mm_loadu_si128((const __m128i*)img0);
_mm_storeu_si128((__m128i*)tmpptr, _v);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
__m256i _sum04_15 = _mm256_setzero_si256();
__m256i _sum14_05 = _mm256_setzero_si256();
__m256i _sum06_17 = _mm256_setzero_si256();
__m256i _sum16_07 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
__m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16);
__m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16);
__m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16);
__m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16);
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03));
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03));
#endif
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum04_15 = _mm256_dpwssd_epi32(_sum04_15, _val23_16, _w01_16);
_sum14_05 = _mm256_dpwssd_epi32(_sum14_05, _val32_16, _w01_16);
_sum06_17 = _mm256_dpwssd_epi32(_sum06_17, _val23_16, _w23_16);
_sum16_07 = _mm256_dpwssd_epi32(_sum16_07, _val32_16, _w23_16);
#else
__m256i _sl04_15 = _mm256_mullo_epi16(_val23_16, _w01_16);
__m256i _sh04_15 = _mm256_mulhi_epi16(_val23_16, _w01_16);
__m256i _sl14_05 = _mm256_mullo_epi16(_val32_16, _w01_16);
__m256i _sh14_05 = _mm256_mulhi_epi16(_val32_16, _w01_16);
__m256i _sl06_17 = _mm256_mullo_epi16(_val23_16, _w23_16);
__m256i _sh06_17 = _mm256_mulhi_epi16(_val23_16, _w23_16);
__m256i _sl16_07 = _mm256_mullo_epi16(_val32_16, _w23_16);
__m256i _sh16_07 = _mm256_mulhi_epi16(_val32_16, _w23_16);
_sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpacklo_epi16(_sl04_15, _sh04_15));
_sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpacklo_epi16(_sl14_05, _sh14_05));
_sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpacklo_epi16(_sl06_17, _sh06_17));
_sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpacklo_epi16(_sl16_07, _sh16_07));
_sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpackhi_epi16(_sl04_15, _sh04_15));
_sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpackhi_epi16(_sl14_05, _sh14_05));
_sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpackhi_epi16(_sl06_17, _sh06_17));
_sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpackhi_epi16(_sl16_07, _sh16_07));
#endif
tmpptr += 32;
kptr0 += 32;
}
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum04_15, _sum14_05);
_tmp1 = _mm256_unpacklo_epi32(_sum06_17, _sum16_07);
_tmp2 = _mm256_unpackhi_epi32(_sum04_15, _sum14_05);
_tmp3 = _mm256_unpackhi_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum14_05 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum06_17 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum16_07 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum14_05);
_sum06_17 = _mm256_add_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum06_17);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
_sum04_15 = _mm256_permutevar8x32_epi32(_sum04_15, _perm_mask);
int sum[16];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
_mm256_storeu_si256((__m256i*)(sum + 8), _sum04_15);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0[2] = sum[8];
outptr1[2] = sum[9];
outptr2[2] = sum[10];
outptr3[2] = sum[11];
outptr0[3] = sum[12];
outptr1[3] = sum[13];
outptr2[3] = sum[14];
outptr3[3] = sum[15];
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
__m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16);
__m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16);
__m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16);
__m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16);
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03));
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03));
#endif
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum02 = _mm_maddd_epi16(_val0, _w2, _sum02);
_sum03 = _mm_maddd_epi16(_val0, _w3, _sum03);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
_sum12 = _mm_maddd_epi16(_val1, _w2, _sum12);
_sum13 = _mm_maddd_epi16(_val1, _w3, _sum13);
#else
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
__m128i _sl02 = _mm_mullo_epi16(_val0, _w2);
__m128i _sh02 = _mm_mulhi_epi16(_val0, _w2);
__m128i _sl03 = _mm_mullo_epi16(_val0, _w3);
__m128i _sh03 = _mm_mulhi_epi16(_val0, _w3);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl11 = _mm_mullo_epi16(_val1, _w1);
__m128i _sh11 = _mm_mulhi_epi16(_val1, _w1);
__m128i _sl12 = _mm_mullo_epi16(_val1, _w2);
__m128i _sh12 = _mm_mulhi_epi16(_val1, _w2);
__m128i _sl13 = _mm_mullo_epi16(_val1, _w3);
__m128i _sh13 = _mm_mulhi_epi16(_val1, _w3);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpacklo_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpacklo_epi16(_sl03, _sh03));
_sum00 = _mm_add_epi32(_sum00, _mm_unpackhi_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpackhi_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl03, _sh03));
_sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpacklo_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpacklo_epi16(_sl13, _sh13));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpackhi_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl13, _sh13));
#endif
#endif
tmpptr += 16;
kptr0 += 32;
}
#if __AVX2__
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
int sum[8];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum00);
_mm_storeu_si128((__m128i*)(sum + 4), _sum10);
#endif
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
_val = _mm_cvtepi8_epi16(_val);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _valval = _mm256_inserti128_si256(_mm256_castsi128_si256(_val), _val, 1);
#if __AVXVNNI__ || __AVX512VNNI__
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _valval, _w01_16);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _valval, _w23_16);
#else
__m256i _sl0_1 = _mm256_mullo_epi16(_valval, _w01_16);
__m256i _sh0_1 = _mm256_mulhi_epi16(_valval, _w01_16);
__m256i _sl2_3 = _mm256_mullo_epi16(_valval, _w23_16);
__m256i _sh2_3 = _mm256_mulhi_epi16(_valval, _w23_16);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl0_1, _sh0_1));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl2_3, _sh2_3));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl0_1, _sh0_1));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl2_3, _sh2_3));
#endif
#else
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val = _mm_cvtepi8_epi16(_val);
#else
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
#endif
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum0 = _mm_maddd_epi16(_val, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val, _w1, _sum1);
_sum2 = _mm_maddd_epi16(_val, _w2, _sum2);
_sum3 = _mm_maddd_epi16(_val, _w3, _sum3);
#else
__m128i _sl0 = _mm_mullo_epi16(_val, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val, _w1);
__m128i _sh1 = _mm_mulhi_epi16(_val, _w1);
__m128i _sl2 = _mm_mullo_epi16(_val, _w2);
__m128i _sh2 = _mm_mulhi_epi16(_val, _w2);
__m128i _sl3 = _mm_mullo_epi16(_val, _w3);
__m128i _sh3 = _mm_mulhi_epi16(_val, _w3);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpacklo_epi16(_sl3, _sh3));
_sum0 = _mm_add_epi32(_sum0, _mm_unpackhi_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpackhi_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3));
#endif
#endif
tmpptr += 8;
kptr0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
__m128i _sum2 = _mm256_extracti128_si256(_sum2_3, 0);
__m128i _sum3 = _mm256_extracti128_si256(_sum2_3, 1);
#endif
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum0_2 = _mm256_setzero_si256();
__m256i _sum1_3 = _mm256_setzero_si256();
__m256i _sum4_6 = _mm256_setzero_si256();
__m256i _sum5_7 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
__m256i _sl00_10 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_10 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl20_30 = _mm256_mullo_epi16(_val23_16, _w01_16);
__m256i _sh20_30 = _mm256_mulhi_epi16(_val23_16, _w01_16);
_sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl00_10, _sh00_10));
_sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl00_10, _sh00_10));
_sum4_6 = _mm256_add_epi32(_sum4_6, _mm256_unpacklo_epi16(_sl20_30, _sh20_30));
_sum5_7 = _mm256_add_epi32(_sum5_7, _mm256_unpackhi_epi16(_sl20_30, _sh20_30));
tmpptr += 32;
kptr0 += 8;
}
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
_sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7);
__m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0);
__m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1);
__m128i _sum4 = _mm256_extracti128_si256(_sum4_6, 1);
__m128i _sum6 = _mm256_extracti128_si256(_sum4_6, 1);
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum2);
outptr0[2] = _mm_reduce_add_epi32(_sum4);
outptr0[3] = _mm_reduce_add_epi32(_sum6);
outptr0 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum0_2 = _mm256_setzero_si256();
__m256i _sum1_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
__m256i _sl00_10 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_10 = _mm256_mulhi_epi16(_val01_16, _w01_16);
_sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl00_10, _sh00_10));
_sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl00_10, _sh00_10));
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10));
#endif
tmpptr += 16;
kptr0 += 8;
}
#if __AVX2__
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
__m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0);
__m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1);
#else
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
#endif
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum2);
outptr0 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
tmpptr += 8;
kptr0 += 8;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u);
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
int64_t* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8to1_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
resource_manager_test.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef UNIT_CORE_RESOURCE_MANAGER_TEST_H_
#define UNIT_CORE_RESOURCE_MANAGER_TEST_H_
#include <algorithm>
#include <vector>
#include "core/environment/environment.h"
#include "core/resource_manager.h"
#include "core/sim_object/sim_object.h"
#include "core/util/io.h"
#include "core/util/type.h"
#include "unit/test_util/test_sim_object.h"
#include "unit/test_util/test_util.h"
#define ROOTFILE "bdmFile.root"
namespace bdm {
class A : public TestSimObject {
BDM_SIM_OBJECT_HEADER(A, TestSimObject, 1, data_);
public:
A() {} // for ROOT I/O
A(const Event& event, SimObject* other, uint64_t new_oid = 0)
: Base(event, other, new_oid) {}
explicit A(int data) { data_ = data; }
int GetData() const { return data_; }
void SetData(int data) { data_ = data; }
int data_;
};
class B : public TestSimObject {
BDM_SIM_OBJECT_HEADER(B, TestSimObject, 1, data_);
public:
B() {} // for ROOT I/O
B(const Event& event, SimObject* other, uint64_t new_oid = 0)
: Base(event, other, new_oid) {}
explicit B(double data) { data_ = data; }
double GetData() const { return data_; }
void SetData(double data) { data_ = data; }
double data_;
};
inline void RunApplyOnAllElementsTest() {
const double kEpsilon = abs_error<double>::value;
Simulation simulation("RunApplyOnAllElementsTest");
auto* rm = simulation.GetResourceManager();
auto ref_uid = SoUid(simulation.GetSoUidGenerator()->GetHighestIndex());
rm->push_back(new A(12));
rm->push_back(new A(34));
rm->push_back(new B(3.14));
rm->push_back(new B(6.28));
uint64_t counter = 0;
rm->ApplyOnAllElements([&](SimObject* element) { // NOLINT
counter++;
switch (element->GetUid() - ref_uid) {
case 0:
EXPECT_EQ(12, dynamic_cast<A*>(element)->GetData());
break;
case 1:
EXPECT_EQ(34, dynamic_cast<A*>(element)->GetData());
break;
case 2:
EXPECT_NEAR(3.14, dynamic_cast<B*>(element)->GetData(), kEpsilon);
break;
case 3:
EXPECT_NEAR(6.28, dynamic_cast<B*>(element)->GetData(), kEpsilon);
break;
}
});
EXPECT_EQ(4u, counter);
}
inline void RunGetNumSimObjects() {
Simulation simulation("ResourceManagerTest-RunGetNumSimObjects");
auto* rm = simulation.GetResourceManager();
rm->push_back(new A(12));
rm->push_back(new A(34));
rm->push_back(new A(59));
rm->push_back(new B(3.14));
rm->push_back(new B(6.28));
EXPECT_EQ(5u, rm->GetNumSimObjects());
}
struct ApplyOnAllElementsParallelTestFunctor : Functor<void, SimObject*> {
void operator()(SimObject* sim_object) override {
const double kEpsilon = abs_error<double>::value;
B* b = dynamic_cast<B*>(sim_object);
SoUid uid = sim_object->GetUid();
if (uid == SoUid(0)) {
EXPECT_EQ(3.14, b->GetData());
} else if (uid == SoUid(1)) {
EXPECT_EQ(6.28, b->GetData());
} else if (uid == SoUid(2)) {
EXPECT_NEAR(9.42, b->GetData(), kEpsilon);
} else {
FAIL();
}
}
};
// This test uses Cells since A, and B are strippted down simulation objects
// and are themselves not thread safe.
inline void RunApplyOnAllElementsParallelTest() {
Simulation simulation("RunApplyOnAllElementsParallelTest");
auto* rm = simulation.GetResourceManager();
rm->push_back(new B(3.14));
rm->push_back(new B(6.28));
rm->push_back(new B(9.42));
ApplyOnAllElementsParallelTestFunctor functor;
rm->ApplyOnAllElementsParallel(functor);
}
inline void RunRemoveAndContainsTest() {
Simulation simulation("ResourceManagerTest-RunRemoveAndContainsTest");
auto* rm = simulation.GetResourceManager();
A* a0 = new A(12);
auto a0_uid = a0->GetUid();
rm->push_back(a0);
A* a1 = new A(34);
auto a1_uid = a1->GetUid();
rm->push_back(a1);
A* a2 = new A(59);
auto a2_uid = a2->GetUid();
rm->push_back(a2);
B* b0 = new B(3.14);
auto b0_uid = b0->GetUid();
rm->push_back(b0);
B* b1 = new B(6.28);
auto b1_uid = b1->GetUid();
rm->push_back(b1);
EXPECT_TRUE(rm->Contains(a0_uid));
EXPECT_TRUE(rm->Contains(a1_uid));
EXPECT_TRUE(rm->Contains(a2_uid));
EXPECT_TRUE(rm->Contains(b0_uid));
EXPECT_TRUE(rm->Contains(b1_uid));
rm->Remove(a0_uid);
rm->Remove(a1_uid);
rm->Remove(a2_uid);
rm->Remove(b0_uid);
rm->Remove(b1_uid);
EXPECT_FALSE(rm->Contains(a0_uid));
EXPECT_FALSE(rm->Contains(a1_uid));
EXPECT_FALSE(rm->Contains(a2_uid));
EXPECT_FALSE(rm->Contains(b0_uid));
EXPECT_FALSE(rm->Contains(b1_uid));
EXPECT_EQ(0u, rm->GetNumSimObjects());
}
inline void RunClearTest() {
Simulation simulation("ResourceManagerTest-RunClearTest");
auto* rm = simulation.GetResourceManager();
A* a0 = new A(12);
auto a0_uid = a0->GetUid();
rm->push_back(a0);
A* a1 = new A(34);
auto a1_uid = a1->GetUid();
rm->push_back(a1);
A* a2 = new A(59);
auto a2_uid = a2->GetUid();
rm->push_back(a2);
B* b0 = new B(3.14);
auto b0_uid = b0->GetUid();
rm->push_back(b0);
B* b1 = new B(6.28);
auto b1_uid = b1->GetUid();
rm->push_back(b1);
EXPECT_TRUE(rm->Contains(a0_uid));
EXPECT_TRUE(rm->Contains(a1_uid));
EXPECT_TRUE(rm->Contains(a2_uid));
EXPECT_TRUE(rm->Contains(b0_uid));
EXPECT_TRUE(rm->Contains(b1_uid));
rm->Clear();
EXPECT_FALSE(rm->Contains(a0_uid));
EXPECT_FALSE(rm->Contains(a1_uid));
EXPECT_FALSE(rm->Contains(a2_uid));
EXPECT_FALSE(rm->Contains(b0_uid));
EXPECT_FALSE(rm->Contains(b1_uid));
EXPECT_EQ(0u, rm->GetNumSimObjects());
}
inline void RunPushBackAndGetSimObjectTest() {
const double kEpsilon = abs_error<double>::value;
Simulation simulation("RunPushBackAndGetSimObjectTest");
auto* rm = simulation.GetResourceManager();
auto ref_uid = SoUid(simulation.GetSoUidGenerator()->GetHighestIndex());
rm->push_back(new A(12));
rm->push_back(new A(34));
rm->push_back(new B(3.14));
rm->push_back(new B(6.28));
rm->push_back(new A(87));
EXPECT_EQ(dynamic_cast<A*>(rm->GetSimObject(ref_uid))->GetData(), 12);
EXPECT_EQ(dynamic_cast<A*>(rm->GetSimObject(ref_uid + 1))->GetData(), 34);
EXPECT_EQ(dynamic_cast<A*>(rm->GetSimObject(ref_uid + 4))->GetData(), 87);
EXPECT_NEAR(dynamic_cast<B*>(rm->GetSimObject(ref_uid + 2))->GetData(), 3.14,
kEpsilon);
EXPECT_NEAR(dynamic_cast<B*>(rm->GetSimObject(ref_uid + 3))->GetData(), 6.28,
kEpsilon);
}
// -----------------------------------------------------------------------------
// https://github.com/osmhpi/pgasus/blob/775a5f90d8f6fa89cfb93eac6de16dcfe27167ce/src/util/mmaphelper.cpp
inline static void* AlignPage(const void* ptr) {
static constexpr uintptr_t kPageMask = ~(uintptr_t(0xFFF));
return (void*)(((uintptr_t)ptr) & kPageMask); // NOLINT
}
inline int GetNumaNodeForMemory(const void* ptr) {
int result, loc;
void* pptr = AlignPage(ptr);
result = numa_move_pages(0, 1, &pptr, nullptr, &loc, 0);
return (result != 0) ? -1 : loc;
}
inline std::vector<uint64_t> GetSoPerNuma(uint64_t num_sim_objects) {
// balance simulation objects per numa node according to the number of
// threads associated with each numa domain
auto* ti = ThreadInfo::GetInstance();
int numa_nodes = ti->GetNumaNodes();
std::vector<uint64_t> so_per_numa(numa_nodes);
uint64_t cummulative = 0;
auto max_threads = ti->GetMaxThreads();
for (int n = 1; n < numa_nodes; ++n) {
auto threads_in_numa = ti->GetThreadsInNumaNode(n);
uint64_t num_so = num_sim_objects * threads_in_numa / max_threads;
so_per_numa[n] = num_so;
cummulative += num_so;
}
so_per_numa[0] = num_sim_objects - cummulative;
return so_per_numa;
}
// -----------------------------------------------------------------------------
struct CheckApplyOnAllElementsFunctor : Functor<void, SimObject*> {
bool numa_checks;
std::vector<bool> found;
std::atomic<uint64_t> cnt;
// counts the number of sim objects in each numa domain
std::vector<uint64_t> numa_so_cnts;
std::atomic<uint64_t> numa_memory_errors;
std::atomic<uint64_t> numa_thread_errors;
CheckApplyOnAllElementsFunctor(uint64_t num_so_per_type, bool numa_checks)
: numa_checks(numa_checks),
cnt(0),
numa_memory_errors(0),
numa_thread_errors(0) {
found.resize(2 * num_so_per_type);
for (uint64_t i = 0; i < found.size(); ++i) {
found[i] = false;
}
auto* ti = ThreadInfo::GetInstance();
numa_so_cnts.resize(ti->GetNumaNodes());
}
void operator()(SimObject* so) override {
size_t index = 0;
if (A* a = dynamic_cast<A*>(so)) {
index = a->GetData();
} else if (B* b = dynamic_cast<B*>(so)) {
index = std::round(b->GetData());
}
auto* rm = Simulation::GetActive()->GetResourceManager();
auto handle = rm->GetSoHandle(so->GetUid());
#pragma omp critical
{
found[index] = true;
// verify that a thread processes sim objects on the same NUMA node.
if (numa_checks && handle.GetNumaNode() != GetNumaNodeForMemory(so)) {
numa_memory_errors++;
}
if (numa_checks &&
handle.GetNumaNode() != numa_node_of_cpu(sched_getcpu())) {
numa_thread_errors++;
}
numa_so_cnts[handle.GetNumaNode()]++;
}
cnt++;
}
};
inline void CheckApplyOnAllElements(ResourceManager* rm,
uint64_t num_so_per_type,
bool numa_checks = false) {
CheckApplyOnAllElementsFunctor functor(num_so_per_type, numa_checks);
rm->ApplyOnAllElementsParallel(functor);
EXPECT_EQ(2 * num_so_per_type, functor.cnt.load());
ASSERT_EQ(2 * num_so_per_type, functor.found.size());
for (uint64_t i = 0; i < functor.found.size(); ++i) {
if (!functor.found[i]) {
FAIL()
<< "ApplyOnAllElementsParallel was not called for element with data_="
<< i;
}
}
if (numa_checks) {
EXPECT_EQ(0u, functor.numa_memory_errors.load());
EXPECT_EQ(0u, functor.numa_thread_errors.load());
auto so_per_numa = GetSoPerNuma(2 * num_so_per_type);
auto* ti = ThreadInfo::GetInstance();
for (int n = 0; n < ti->GetNumaNodes(); ++n) {
EXPECT_EQ(so_per_numa[n], functor.numa_so_cnts[n]);
}
}
}
inline void RunSortAndApplyOnAllElementsParallel(uint64_t num_so_per_type) {
Simulation simulation("RunSortAndApplyOnAllElementsParallel");
auto* rm = simulation.GetResourceManager();
std::unordered_map<SoUid, double> a_x_values;
std::unordered_map<SoUid, double> b_x_values;
for (uint64_t i = 0; i < num_so_per_type; ++i) {
double x_pos = i * 30.0;
A* a = new A(i);
a->SetDiameter(10);
a->SetPosition({x_pos, 0, 0});
rm->push_back(a);
a_x_values[a->GetUid()] = x_pos;
B* b = new B(i + num_so_per_type);
b->SetDiameter(10);
b->SetPosition({x_pos, 0, 0});
rm->push_back(b);
b_x_values[b->GetUid()] = x_pos;
}
CheckApplyOnAllElements(rm, num_so_per_type);
simulation.GetEnvironment()->Update();
rm->SortAndBalanceNumaNodes();
CheckApplyOnAllElements(rm, num_so_per_type, true);
// check if sim object uids still point to the correct object
for (auto& entry : a_x_values) {
auto x_actual = rm->GetSimObject(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
for (auto& entry : b_x_values) {
auto x_actual = rm->GetSimObject(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
}
inline void RunSortAndApplyOnAllElementsParallel() {
int num_threads = omp_get_max_threads();
std::vector<int> num_so_per_type = {std::max(1, num_threads - 1), num_threads,
3 * num_threads, 3 * num_threads + 1};
for (auto n : num_so_per_type) {
RunSortAndApplyOnAllElementsParallel(n);
}
RunSortAndApplyOnAllElementsParallel(1000);
}
// -----------------------------------------------------------------------------
struct CheckApplyOnAllElementsDynamicFunctor
: Functor<void, SimObject*, SoHandle> {
CheckApplyOnAllElementsDynamicFunctor(bool numa_checks,
std::vector<bool>& found)
: numa_checks_(numa_checks),
found_(found),
cnt(0),
numa_memory_errors(0) {
auto* ti = ThreadInfo::GetInstance();
numa_so_cnts.resize(ti->GetNumaNodes());
}
void operator()(SimObject* so, SoHandle handle) override {
#pragma omp critical
{
size_t index = 0;
if (A* a = dynamic_cast<A*>(so)) {
index = a->GetData();
} else if (B* b = dynamic_cast<B*>(so)) {
index = std::round(b->GetData());
}
found_[index] = true;
// verify that a thread processes sim objects on the same NUMA node.
if (numa_checks_ && handle.GetNumaNode() != GetNumaNodeForMemory(so)) {
numa_memory_errors++;
}
numa_so_cnts[handle.GetNumaNode()]++;
}
cnt++;
}
bool numa_checks_;
std::vector<bool>& found_;
std::atomic<uint64_t> cnt;
// counts the number of sim objects in each numa domain
std::vector<uint64_t> numa_so_cnts;
// If a simulation object is not stored on the NUMA indicated, it is a memory
// error.
std::atomic<uint64_t> numa_memory_errors;
};
struct CheckNumaThreadErrors : Functor<void, SimObject*, SoHandle> {
CheckNumaThreadErrors() : numa_thread_errors(0) {
ti_ = ThreadInfo::GetInstance();
}
void operator()(SimObject* so, SoHandle handle) override {
volatile double d = 0;
for (int i = 0; i < 10000; i++) {
d += std::sin(i);
}
if (handle.GetNumaNode() != ti_->GetNumaNode(omp_get_thread_num())) {
numa_thread_errors++;
}
}
// If a sim object is processed by a thread that doesn't belong to the NUMA
// domain the sim object is stored on, it is a thread error.
std::atomic<uint64_t> numa_thread_errors;
ThreadInfo* ti_;
};
inline void CheckApplyOnAllElementsDynamic(ResourceManager* rm,
uint64_t num_so_per_type,
uint64_t batch_size,
bool numa_checks = false) {
std::vector<bool> found(2 * num_so_per_type);
ASSERT_EQ(2 * num_so_per_type, found.size());
for (uint64_t i = 0; i < found.size(); ++i) {
found[i] = false;
}
auto* ti = ThreadInfo::GetInstance();
CheckApplyOnAllElementsDynamicFunctor functor(numa_checks, found);
rm->ApplyOnAllElementsParallelDynamic(batch_size, functor);
// critical sections increase the variance of numa_thread_errors.
// Therefore, there are checked separately.
CheckNumaThreadErrors check_numa_thread_functor;
rm->ApplyOnAllElementsParallelDynamic(batch_size, check_numa_thread_functor);
// verify that the function has been called once for each sim object
EXPECT_EQ(2 * num_so_per_type, functor.cnt.load());
ASSERT_EQ(2 * num_so_per_type, found.size());
for (uint64_t i = 0; i < found.size(); ++i) {
if (!found[i]) {
FAIL()
<< "ApplyOnAllElementsParallel was not called for element with data_="
<< i;
}
}
if (numa_checks) {
// If there are memory errors, check of
// `cat /proc/sys/kernel/numa_balancing` is zero.
// Automatic rebalancing can lead to numa memory errors.
// only 0.1% of all sim objects may be on a wrong numa node
EXPECT_GT(0.001, (functor.numa_memory_errors.load() + 0.0) /
(2 * num_so_per_type));
// work stealing can cause thread errors. This check ensures that at least
// 75% of the work is done by the correct CPU-Memory mapping.
if (num_so_per_type > 20 * static_cast<uint64_t>(omp_get_max_threads())) {
EXPECT_GT(num_so_per_type / 4,
check_numa_thread_functor.numa_thread_errors.load());
}
auto so_per_numa = GetSoPerNuma(2 * num_so_per_type);
for (int n = 0; n < ti->GetNumaNodes(); ++n) {
EXPECT_EQ(so_per_numa[n], functor.numa_so_cnts[n]);
}
}
}
inline void RunSortAndApplyOnAllElementsParallelDynamic(
uint64_t num_so_per_type, uint64_t batch_size) {
Simulation simulation("RunSortAndApplyOnAllElementsParallel");
auto* rm = simulation.GetResourceManager();
std::unordered_map<SoUid, double> a_x_values;
std::unordered_map<SoUid, double> b_x_values;
for (uint64_t i = 0; i < num_so_per_type; ++i) {
double x_pos = i * 30.0;
A* a = new A(i);
a->SetDiameter(10);
a->SetPosition({x_pos, 0, 0});
rm->push_back(a);
a_x_values[a->GetUid()] = x_pos;
B* b = new B(i + num_so_per_type);
b->SetDiameter(10);
b->SetPosition({x_pos, 0, 0});
rm->push_back(b);
b_x_values[b->GetUid()] = x_pos;
}
CheckApplyOnAllElementsDynamic(rm, num_so_per_type, batch_size);
simulation.GetEnvironment()->Update();
rm->SortAndBalanceNumaNodes();
CheckApplyOnAllElementsDynamic(rm, num_so_per_type, batch_size, true);
// check if sim object uids still point to the correct object
for (auto& entry : a_x_values) {
auto x_actual = rm->GetSimObject(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
for (auto& entry : b_x_values) {
auto x_actual = rm->GetSimObject(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
}
inline void RunSortAndApplyOnAllElementsParallelDynamic() {
int num_threads = omp_get_max_threads();
std::vector<int> num_so_per_type = {std::max(1, num_threads - 1), num_threads,
3 * num_threads, 3 * num_threads + 1};
std::vector<int> batch_sizes = {std::max(1, num_threads - 1), num_threads,
3 * num_threads, 3 * num_threads + 1};
for (auto n : num_so_per_type) {
for (auto b : batch_sizes) {
RunSortAndApplyOnAllElementsParallelDynamic(n, b);
}
}
for (auto b : batch_sizes) {
RunSortAndApplyOnAllElementsParallelDynamic(num_threads * 1000, b);
}
}
inline void RunIOTest() {
const double kEpsilon = abs_error<double>::value;
Simulation simulation("ResourceManagerTest-RunIOTest");
auto* rm = simulation.GetResourceManager();
auto ref_uid = SoUid(simulation.GetSoUidGenerator()->GetHighestIndex());
remove(ROOTFILE);
// setup
rm->push_back(new A(12));
rm->push_back(new A(34));
rm->push_back(new A(42));
rm->push_back(new B(3.14));
rm->push_back(new B(6.28));
DiffusionGrid* dgrid_1 = new DiffusionGrid(0, "Kalium", 0.4, 0, 2);
DiffusionGrid* dgrid_2 = new DiffusionGrid(1, "Natrium", 0.2, 0.1, 1);
rm->AddDiffusionGrid(dgrid_1);
rm->AddDiffusionGrid(dgrid_2);
// backup
WritePersistentObject(ROOTFILE, "rm", *rm, "new");
rm->Clear();
// restore
ResourceManager* restored_rm = nullptr;
GetPersistentObject(ROOTFILE, "rm", restored_rm);
restored_rm->RestoreUidSoMap();
// validate
EXPECT_EQ(5u, restored_rm->GetNumSimObjects());
EXPECT_EQ(12,
dynamic_cast<A*>(restored_rm->GetSimObject(ref_uid))->GetData());
EXPECT_EQ(
34, dynamic_cast<A*>(restored_rm->GetSimObject(ref_uid + 1))->GetData());
EXPECT_EQ(
42, dynamic_cast<A*>(restored_rm->GetSimObject(ref_uid + 2))->GetData());
EXPECT_NEAR(
3.14, dynamic_cast<B*>(restored_rm->GetSimObject(ref_uid + 3))->GetData(),
kEpsilon);
EXPECT_NEAR(
6.28, dynamic_cast<B*>(restored_rm->GetSimObject(ref_uid + 4))->GetData(),
kEpsilon);
EXPECT_EQ(0, restored_rm->GetDiffusionGrid(0)->GetSubstanceId());
EXPECT_EQ(1, restored_rm->GetDiffusionGrid(1)->GetSubstanceId());
EXPECT_EQ("Kalium", restored_rm->GetDiffusionGrid(0)->GetSubstanceName());
EXPECT_EQ("Natrium", restored_rm->GetDiffusionGrid(1)->GetSubstanceName());
EXPECT_EQ(0.6,
restored_rm->GetDiffusionGrid(0)->GetDiffusionCoefficients()[0]);
EXPECT_EQ(0.8,
restored_rm->GetDiffusionGrid(1)->GetDiffusionCoefficients()[0]);
delete restored_rm;
remove(ROOTFILE);
}
} // namespace bdm
#endif // UNIT_CORE_RESOURCE_MANAGER_TEST_H_
|
omp_for_bigbounds.c | // RUN: %libomp-compile -DMY_SCHEDULE=static && %libomp-run
// RUN: %libomp-compile -DMY_SCHEDULE=dynamic && %libomp-run
// RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run
// Only works with Intel Compiler since at least version 15.0 and clang since
// version 11.
// XFAIL: gcc, clang-3, clang-4, clang-5, clang-6, clang-7, clang-8, clang-9, clang-10
// icc 21 seems to have an issue with the loop boundaries and runs very long
// UNSUPPORTED: icc-21
/*
* Test that large bounds are handled properly and calculations of
* loop iterations don't accidentally overflow
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <limits.h>
#include "omp_testsuite.h"
#define INCR 50000000
#define MY_MAX 2000000000
#define MY_MIN -2000000000
#ifndef MY_SCHEDULE
# define MY_SCHEDULE static
#endif
int a, b, a_known_value, b_known_value;
int test_omp_for_bigbounds()
{
a = 0;
b = 0;
#pragma omp parallel
{
int i;
#pragma omp for schedule(MY_SCHEDULE) reduction(+:a)
for (i = INT_MIN; i < MY_MAX; i+=INCR) {
a++;
}
#pragma omp for schedule(MY_SCHEDULE) reduction(+:b)
for (i = INT_MAX; i >= MY_MIN; i-=INCR) {
b++;
}
}
printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value);
return (a == a_known_value && b == b_known_value);
}
int main()
{
int i;
int num_failed=0;
a_known_value = 0;
for (i = INT_MIN; i < MY_MAX; i+=INCR) {
a_known_value++;
}
b_known_value = 0;
for (i = INT_MAX; i >= MY_MIN; i-=INCR) {
b_known_value++;
}
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_bigbounds()) {
num_failed++;
}
}
return num_failed;
}
|
GB_binop__isle_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8)
// A*D function (colscale): GB (_AxD__isle_uint8)
// D*A function (rowscale): GB (_DxB__isle_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8)
// C=scalar+B GB (_bind1st__isle_uint8)
// C=scalar+B' GB (_bind1st_tran__isle_uint8)
// C=A+scalar GB (_bind2nd__isle_uint8)
// C=A'+scalar GB (_bind2nd_tran__isle_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
#pragma once
#include <fcntl.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <string>
#include <memory>
#include <random>
#include <set>
#ifdef __APPLE__
#else
#include <malloc.h>
#endif
#ifdef _WINDOWS
#include <Windows.h>
typedef HANDLE FileHandle;
#else
#include <unistd.h>
typedef int FileHandle;
#endif
#include "logger.h"
#include "cached_io.h"
#include "common_includes.h"
#include "windows_customizations.h"
#ifdef EXEC_ENV_OLS
#include "content_buf.h"
#include "memory_mapped_files.h"
#endif
// taken from
// https://github.com/Microsoft/BLAS-on-flash/blob/master/include/utils.h
// round up X to the nearest multiple of Y
#define ROUND_UP(X, Y) \
((((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) * (Y))
#define DIV_ROUND_UP(X, Y) (((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0))
// round down X to the nearest multiple of Y
#define ROUND_DOWN(X, Y) (((uint64_t)(X) / (Y)) * (Y))
// alignment tests
#define IS_ALIGNED(X, Y) ((uint64_t)(X) % (uint64_t)(Y) == 0)
#define IS_512_ALIGNED(X) IS_ALIGNED(X, 512)
#define IS_4096_ALIGNED(X) IS_ALIGNED(X, 4096)
typedef uint64_t _u64;
typedef int64_t _s64;
typedef uint32_t _u32;
typedef int32_t _s32;
typedef uint16_t _u16;
typedef int16_t _s16;
typedef uint8_t _u8;
typedef int8_t _s8;
namespace diskann {
static const size_t MAX_SIZE_OF_STREAMBUF = 2LL * 1024 * 1024 * 1024;
enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 };
inline void alloc_aligned(void** ptr, size_t size, size_t align) {
*ptr = nullptr;
assert(IS_ALIGNED(size, align));
#ifndef _WINDOWS
*ptr = ::aligned_alloc(align, size);
#else
*ptr = ::_aligned_malloc(size, align); // note the swapped arguments!
#endif
assert(*ptr != nullptr);
}
inline void aligned_free(void* ptr) {
// Gopal. Must have a check here if the pointer was actually allocated by
// _alloc_aligned
if (ptr == nullptr) {
return;
}
#ifndef _WINDOWS
free(ptr);
#else
::_aligned_free(ptr);
#endif
}
inline void GenRandom(std::mt19937& rng, unsigned* addr, unsigned size,
unsigned N) {
for (unsigned i = 0; i < size; ++i) {
addr[i] = rng() % (N - size);
}
std::sort(addr, addr + size);
for (unsigned i = 1; i < size; ++i) {
if (addr[i] <= addr[i - 1]) {
addr[i] = addr[i - 1] + 1;
}
}
unsigned off = rng() % N;
for (unsigned i = 0; i < size; ++i) {
addr[i] = (addr[i] + off) % N;
}
}
// get_bin_metadata functions START
inline void get_bin_metadata_impl(std::basic_istream<char>& reader,
size_t& nrows, size_t& ncols) {
int nrows_32, ncols_32;
reader.read((char*) &nrows_32, sizeof(int));
reader.read((char*) &ncols_32, sizeof(int));
nrows = nrows_32;
ncols = ncols_32;
}
#ifdef EXEC_ENV_OLS
inline void get_bin_metadata(MemoryMappedFiles& files,
const std::string& bin_file, size_t& nrows,
size_t& ncols) {
diskann::cout << "Getting metadata for file: " << bin_file << std::endl;
auto fc = files.getContent(bin_file);
auto cb = ContentBuf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&cb);
get_bin_metadata_impl(reader, nrows, ncols);
}
#endif
inline void get_bin_metadata(const std::string& bin_file, size_t& nrows,
size_t& ncols) {
std::ifstream reader(bin_file.c_str(), std::ios::binary);
get_bin_metadata_impl(reader, nrows, ncols);
}
// get_bin_metadata functions END
template<typename T>
inline std::string getValues(T* data, size_t num) {
std::stringstream stream;
stream << "[";
for (size_t i = 0; i < num; i++) {
stream << std::to_string(data[i]) << ",";
}
stream << "]" << std::endl;
return stream.str();
}
// load_bin functions START
template<typename T>
inline void load_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data, size_t& npts,
size_t& dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data = new T[npts * dim];
reader.read((char*) data, npts * dim * sizeof(T));
// diskann::cout << "Last bytes: "
// << getValues<T>(data + (npts - 2) * dim, dim);
// diskann::cout << "Finished reading bin file." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
T*& data, size_t& npts, size_t& dim) {
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
auto fc = files.getContent(bin_file);
uint32_t t_npts, t_dim;
uint32_t* contentAsIntPtr = (uint32_t*) (fc._content);
t_npts = *(contentAsIntPtr);
t_dim = *(contentAsIntPtr + 1);
npts = t_npts;
dim = t_dim;
auto actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != fc._size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << fc._size
<< " while expected size is " << actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data =
(T*) ((char*) fc._content + 2 * sizeof(uint32_t)); // No need to copy!
}
#endif
template<typename T>
inline void load_bin(const std::string& bin_file, T*& data, size_t& npts,
size_t& dim) {
// OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_bin_impl<T>(reader, fsize, data, npts, dim);
}
// load_bin functions END
inline void load_truthset(const std::string& bin_file, uint32_t*& ids,
float*& dists, size_t& npts, size_t& dim) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
size_t expected_file_size_just_ids =
npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_just_ids)
truthset_type = 2;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists << " or "
<< expected_file_size_just_ids;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
std::unique_ptr<T[]>& data, size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(files, bin_file, ptr, npts, dim);
data.reset(ptr);
}
#endif
template<typename T>
inline void load_bin(const std::string& bin_file, std::unique_ptr<T[]>& data,
size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(bin_file, ptr, npts, dim);
data.reset(ptr);
}
template<typename T>
inline void save_bin(const std::string& filename, T* data, size_t npts,
size_t ndims) {
std::ofstream writer(filename, std::ios::binary | std::ios::out);
diskann::cout << "Writing bin: " << filename.c_str() << std::endl;
int npts_i32 = (int) npts, ndims_i32 = (int) ndims;
writer.write((char*) &npts_i32, sizeof(int));
writer.write((char*) &ndims_i32, sizeof(int));
diskann::cout << "bin: #pts = " << npts << ", #dims = " << ndims
<< ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int)
<< "B" << std::endl;
// data = new T[npts_u64 * ndims_u64];
writer.write((char*) data, npts * ndims * sizeof(T));
writer.close();
diskann::cout << "Finished writing bin." << std::endl;
}
// load_aligned_bin functions START
template<typename T>
inline void load_aligned_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data,
size_t& npts, size_t& dim,
size_t& rounded_dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str() << std::endl;
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
rounded_dim = ROUND_UP(dim, 16);
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim
<< ", aligned_dim = " << rounded_dim << "..." << std::flush;
size_t allocSize = npts * rounded_dim * sizeof(T);
diskann::cout << "allocating aligned memory, " << allocSize << " bytes..."
<< std::flush;
alloc_aligned(((void**) &data), allocSize, 8 * sizeof(T));
diskann::cout << "done. Copying data..." << std::flush;
for (size_t i = 0; i < npts; i++) {
reader.read((char*) (data + i * rounded_dim), dim * sizeof(T));
memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T));
}
diskann::cout << " done." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_aligned_bin(MemoryMappedFiles& files,
const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
FileContent fc = files.getContent(bin_file);
ContentBuf buf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&buf);
size_t actual_file_size = fc._size;
load_aligned_bin_impl(reader, actual_file_size, data, npts, dim,
rounded_dim);
}
#endif
template<typename T>
inline void load_aligned_bin(const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
// START OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_aligned_bin_impl(reader, fsize, data, npts, dim, rounded_dim);
}
template<typename InType, typename OutType>
void convert_types(const InType* srcmat, OutType* destmat, size_t npts,
size_t dim) {
#pragma omp parallel for schedule(static, 65536)
for (int64_t i = 0; i < (_s64) npts; i++) {
for (uint64_t j = 0; j < dim; j++) {
destmat[i * dim + j] = (OutType) srcmat[i * dim + j];
}
}
}
// plain saves data as npts X ndims array into filename
template<typename T>
void save_Tvecs(const char* filename, T* data, size_t npts, size_t ndims) {
std::string fname(filename);
// create cached ofstream with 64MB cache
cached_ofstream writer(fname, 64 * 1048576);
unsigned dims_u32 = (unsigned) ndims;
// start writing
for (uint64_t i = 0; i < npts; i++) {
// write dims in u32
writer.write((char*) &dims_u32, sizeof(unsigned));
// get cur point in data
T* cur_pt = data + i * ndims;
writer.write((char*) cur_pt, ndims * sizeof(T));
}
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T0);
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector_l2(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T1);
}
}; // namespace diskann
struct PivotContainer {
PivotContainer() = default;
PivotContainer(size_t pivo_id, float pivo_dist)
: piv_id{pivo_id}, piv_dist{pivo_dist} {
}
bool operator<(const PivotContainer& p) const {
return p.piv_dist < piv_dist;
}
bool operator>(const PivotContainer& p) const {
return p.piv_dist > piv_dist;
}
size_t piv_id;
float piv_dist;
};
inline bool file_exists(const std::string& name) {
struct stat buffer;
auto val = stat(name.c_str(), &buffer);
diskann::cout << " Stat(" << name.c_str() << ") returned: " << val
<< std::endl;
return (val == 0);
}
inline _u64 get_file_size(const std::string& fname) {
std::ifstream reader(fname, std::ios::binary | std::ios::ate);
if (!reader.fail() && reader.is_open()) {
_u64 end_pos = reader.tellg();
diskann::cout << " Tellg: " << reader.tellg() << " as u64: " << end_pos
<< std::endl;
reader.close();
return end_pos;
} else {
diskann::cout << "Could not open file: " << fname << std::endl;
return 0;
}
}
inline bool validate_file_size(const std::string& name) {
std::ifstream in(std::string(name), std::ios::binary);
in.seekg(0, in.end);
size_t actual_file_size = in.tellg();
in.seekg(0, in.beg);
size_t expected_file_size;
in.read((char*) &expected_file_size, sizeof(uint64_t));
if (actual_file_size != expected_file_size) {
diskann::cout << "Error loading" << name << ". Expected "
"size (metadata): "
<< expected_file_size
<< ", actual file size : " << actual_file_size
<< ". Exitting." << std::endl;
in.close();
return false;
}
in.close();
return true;
}
#ifdef _WINDOWS
#include <intrin.h>
#include <Psapi.h>
inline void printProcessMemory(const char* message) {
PROCESS_MEMORY_COUNTERS counters;
HANDLE h = GetCurrentProcess();
GetProcessMemoryInfo(h, &counters, sizeof(counters));
diskann::cout << message << " [Peaking Working Set size: "
<< counters.PeakWorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Working set size: "
<< counters.WorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Private bytes "
<< counters.PagefileUsage * 1.0 / (1024 * 1024 * 1024) << "GB]"
<< std::endl;
}
#else
inline void printProcessMemory(const char* message) {
diskann::cout << message << std::endl;
}
#endif
extern bool AvxSupportedCPU;
extern bool Avx2SupportedCPU;
extern bool Avx512SupportedCPU;
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/memory_.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/resample.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImageChannel() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImageChannel method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const CompositeOperator compose,Image *composite_image,
% const ssize_t x_offset,const ssize_t y_offset)
% MagickBooleanType CompositeImageChannel(Image *image,
% const ChannelType channel,const CompositeOperator compose,
% Image *composite_image,const ssize_t x_offset,const ssize_t y_offset)
%
% A description of each parameter follows:
%
% o image: the destination image, modified by he composition
%
% o channel: the channel.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o composite_image: the composite (source) image.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'composite_image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o "compose:outside-overlay"
% Modify how the composition is to effect areas not directly covered
% by the 'composite_image' at the offset given. Normally this is
% dependant on the 'compose' method, especially Duff-Porter methods.
%
% If set to "false" then disable all normal handling of pixels not
% covered by the composite_image. Typically used for repeated tiling
% of the composite_image by the calling API.
%
% Previous to IM v6.5.3-3 this was called "modify-outside-overlay"
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
/*
** Programmers notes on SVG specification.
**
** A Composition is defined by...
** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc)
** Y = 1 for source preserved
** Z = 1 for destination preserved
**
** Conversion to transparency (then optimized)
** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
**
** Where...
** Sca = Sc*Sa normalized Source color divided by Source alpha
** Dca = Dc*Da normalized Dest color divided by Dest alpha
** Dc' = Dca'/Da' the desired color value for this channel.
**
** Da' in in the follow formula as 'gamma' The resulting alpla value.
**
**
** Most functions use a blending mode of over (X=1,Y=1,Z=1)
** this results in the following optimizations...
** gamma = Sa+Da-Sa*Da;
** gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta;
** opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma
**
** The above SVG definitions also definate that Mathematical Composition
** methods should use a 'Over' blending mode for Alpha Channel.
** It however was not applied for composition modes of 'Plus', 'Minus',
** the modulus versions of 'Add' and 'Subtract'.
**
**
** Mathematical operator changes to be applied from IM v6.7...
**
** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
** 'ModulusAdd' and 'ModulusSubtract' for clarity.
**
** 2/ All mathematical compositions work as per the SVG specification
** with regard to blending. This now includes 'ModulusAdd' and
** 'ModulusSubtract'.
**
** 3/ When the special channel flag 'sync' (syncronize channel updates)
** is turned off (enabled by default) then mathematical compositions are
** only performed on the channels specified, and are applied
** independantally of each other. In other words the mathematics is
** performed as 'pure' mathematical operations, rather than as image
** operations.
*/
static inline MagickRealType Atop(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,
const MagickRealType magick_unused(Da))
{
return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */
}
static inline void CompositeAtop(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */
composite->red=Atop(p->red,Sa,q->red,1.0);
composite->green=Atop(p->green,Sa,q->green,1.0);
composite->blue=Atop(p->blue,Sa,q->blue,1.0);
if (q->colorspace == CMYKColorspace)
composite->index=Atop(p->index,Sa,q->index,1.0);
}
/*
What is this Composition method for? Can't find any specification!
WARNING this is not doing correct 'over' blend handling (Anthony Thyssen).
*/
static inline void CompositeBumpmap(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
intensity;
intensity=MagickPixelIntensity(p);
composite->red=QuantumScale*intensity*q->red;
composite->green=QuantumScale*intensity*q->green;
composite->blue=QuantumScale*intensity*q->blue;
composite->opacity=(MagickRealType) QuantumScale*intensity*
p->opacity;
if (q->colorspace == CMYKColorspace)
composite->index=QuantumScale*intensity*q->index;
}
static inline void CompositeClear(const MagickPixelPacket *q,
MagickPixelPacket *composite)
{
composite->opacity=(MagickRealType) TransparentOpacity;
composite->red=0.0;
composite->green=0.0;
composite->blue=0.0;
if (q->colorspace == CMYKColorspace)
composite->index=0.0;
}
static MagickRealType ColorBurn(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification.
*/
if (Sca*Da + Dca*Sa <= Sa*Da)
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*(Sca*Da+Dca*Sa-Sa*Da)/Sca + Sca*(1.0-Da) + Dca*(1.0-Sa));
#else
/*
March 2009 SVG specification.
*/
if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon))
return(Sa*Da+Dca*(1.0-Sa));
if (Sca < MagickEpsilon)
return(Dca*(1.0-Sa));
return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
#endif
}
static inline void CompositeColorBurn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType ColorDodge(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification.
*/
if ((Sca*Da+Dca*Sa) >= Sa*Da)
return( Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) );
return( Dca*Sa*Sa/(Sa-Sca) + Sca*(1.0-Da) + Dca*(1.0-Sa) );
#endif
#if 0
/*
New specification, March 2009 SVG specification. This specification was
also wrong of non-overlap cases.
*/
if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da));
if (fabs(Sca-Sa) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca)));
#endif
/*
Working from first principles using the original formula:
f(Sc,Dc) = Dc/(1-Sc)
This works correctly! Looks like the 2004 model was right but just
required a extra condition for correct handling.
*/
if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
if (fabs(Sca-Sa) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeColorDodge(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Darken(const MagickRealType p,
const MagickRealType alpha,const MagickRealType q,const MagickRealType beta)
{
if (p < q)
return(MagickOver_(p,alpha,q,beta)); /* src-over */
return(MagickOver_(q,beta,p,alpha)); /* dst-over */
}
static inline void CompositeDarken(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
MagickRealType
gamma;
if ( (channel & SyncChannels) != 0 ) {
composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */
gamma=1.0-QuantumScale*composite->opacity;
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity);
composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity);
composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=MagickMax(p->opacity,q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=MagickMin(p->red,q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=MagickMin(p->green,q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=MagickMin(p->blue,q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=MagickMin(p->index,q->index);
}
}
static inline void CompositeDarkenIntensity(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Select the pixel based on the intensity level.
If 'Sync' flag select whole pixel based on alpha weighted intensity.
Otherwise use intensity only, but restrict copy according to channel.
*/
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity;
Da=1.0-QuantumScale*q->opacity;
*composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q))
? *p : *q;
}
else {
int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q));
if ( (channel & AlphaChannel) != 0 )
composite->opacity = from_p ? p->opacity : q->opacity;
if ( (channel & RedChannel) != 0 )
composite->red = from_p ? p->red : q->red;
if ( (channel & GreenChannel) != 0 )
composite->green = from_p ? p->green : q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue = from_p ? p->blue : q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index = from_p ? p->index : q->index;
}
}
static inline MagickRealType Difference(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,const MagickRealType Da)
{
/* Optimized by Multipling by QuantumRange (taken from gamma). */
return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q));
}
static inline void CompositeDifference(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
/* Values are not normalized as an optimization. */
composite->red=gamma*Difference(p->red,Sa,q->red,Da);
composite->green=gamma*Difference(p->green,Sa,q->green,Da);
composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Difference(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-fabs(p->opacity - q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=fabs(p->red - q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=fabs(p->green - q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=fabs(p->blue - q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=fabs(p->index - q->index);
}
}
static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa,
const MagickRealType Dca,const MagickRealType Da)
{
/*
Divide Source by Destination
f(Sc,Dc) = Sc / Dc
But with appropriate handling for special case of Dc == 0 specifically
so that f(Black,Black)=Black and f(non-Black,Black)=White.
It is however also important to correctly do 'over' alpha blending which
is why the formula becomes so complex.
*/
if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
if (fabs(Dca) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeDivide(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0);
}
}
static MagickRealType Exclusion(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeExclusion(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
gamma,
Sa,
Da;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Exclusion(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Exclusion(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Exclusion(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Exclusion(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0);
}
}
static MagickRealType HardLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
if ((2.0*Sca) < Sa)
return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeHardLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static void CompositeHSB(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,double *hue,double *saturation,double *brightness)
{
MagickRealType
delta,
max,
min;
/*
Convert RGB to HSB colorspace.
*/
assert(hue != (double *) NULL);
assert(saturation != (double *) NULL);
assert(brightness != (double *) NULL);
max=(red > green ? red : green);
if (blue > max)
max=blue;
min=(red < green ? red : green);
if (blue < min)
min=blue;
*hue=0.0;
*saturation=0.0;
*brightness=(double) (QuantumScale*max);
if (max == 0.0)
return;
*saturation=(double) (1.0-min/max);
delta=max-min;
if (delta == 0.0)
return;
if (red == max)
*hue=(double) ((green-blue)/delta);
else
if (green == max)
*hue=(double) (2.0+(blue-red)/delta);
else
if (blue == max)
*hue=(double) (4.0+(red-green)/delta);
*hue/=6.0;
if (*hue < 0.0)
*hue+=1.0;
}
static inline MagickRealType In(const MagickRealType p,
const MagickRealType Sa,const MagickRealType magick_unused(q),
const MagickRealType Da)
{
return(Sa*p*Da);
}
static inline void CompositeIn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
gamma,
Sa,
Da;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa*Da;
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*In(p->red,Sa,q->red,Da);
composite->green=gamma*In(p->green,Sa,q->green,Da);
composite->blue=gamma*In(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*In(p->index,Sa,q->index,Da);
}
static inline MagickRealType Lighten(const MagickRealType p,
const MagickRealType alpha,const MagickRealType q,const MagickRealType beta)
{
if (p > q)
return(MagickOver_(p,alpha,q,beta)); /* src-over */
return(MagickOver_(q,beta,p,alpha)); /* dst-over */
}
static inline void CompositeLighten(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Lighten is also equvalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
MagickRealType
gamma;
if ( (channel & SyncChannels) != 0 ) {
composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */
gamma=1.0-QuantumScale*composite->opacity;
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity);
composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity);
composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=MagickMin(p->opacity,q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=MagickMax(p->red,q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=MagickMax(p->green,q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=MagickMax(p->blue,q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=MagickMax(p->index,q->index);
}
}
static inline void CompositeLightenIntensity(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Select the pixel based on the intensity level.
If 'Sync' flag select whole pixel based on alpha weighted intensity.
Otherwise use Intenisty only, but restrict copy according to channel.
*/
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity;
Da=1.0-QuantumScale*q->opacity;
*composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q))
? *p : *q;
}
else {
int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q));
if ( (channel & AlphaChannel) != 0 )
composite->opacity = from_p ? p->opacity : q->opacity;
if ( (channel & RedChannel) != 0 )
composite->red = from_p ? p->red : q->red;
if ( (channel & GreenChannel) != 0 )
composite->green = from_p ? p->green : q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue = from_p ? p->blue : q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index = from_p ? p->index : q->index;
}
}
#if 0
static inline MagickRealType LinearDodge(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
LinearDodge: simplifies to a trivial formula
f(Sc,Dc) = Sc + Dc
Dca' = Sca + Dca
*/
return(Sca+Dca);
}
#endif
static inline void CompositeLinearDodge(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*(p->red*Sa+q->red*Da);
composite->green=gamma*(p->green*Sa+q->green*Da);
composite->blue=gamma*(p->blue*Sa+q->blue*Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*(p->index*Sa+q->index*Da);
}
static inline MagickRealType LinearBurn(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
return(Sca+Dca-Sa*Da);
}
static inline void CompositeLinearBurn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType LinearLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Previous formula, was only valid for fully-opaque images.
*/
return(Dca+2*Sca-1.0);
#else
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
return((Sca-Sa)*Da+Sca+Dca);
#endif
}
static inline void CompositeLinearLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Mathematics(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da,
const GeometryInfo *geometry_info)
{
/*
'Mathematics' a free form user control mathematical composition is defined
as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite as
a command separated 'geometry' string in "compose:args" image artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+
geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+
Dca*(1.0-Sa));
}
static inline void CompositeMathematics(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo
*args, MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da,args);
composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da,args);
composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da,args);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da,args);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args);
}
}
static inline void CompositePlus(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
/*
NOTE: "Plus" does not use 'over' alpha-blending but uses a
special 'plus' form of alph-blending. It is the ONLY mathematical
operator to do this. this is what makes it different to the
otherwise equivalent "LinearDodge" composition method.
Note however that color channels are still effected by the alpha channel
as a result of the blending, making it just as useless for independant
channel maths, just like all other mathematical composition methods.
As such the removal of the 'sync' flag, is still a usful convention.
The MagickPixelCompositePlus() function is defined in
"composite-private.h" so it can also be used for Image Blending.
*/
MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=p->opacity+q->opacity-QuantumRange;
if ( (channel & RedChannel) != 0 )
composite->red=p->red+q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=p->green+q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=p->blue+q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=p->index+q->index;
}
}
static inline MagickRealType Minus(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,
const MagickRealType magick_unused(Da))
{
/*
Minus Source from Destination
f(Sc,Dc) = Sc - Dc
*/
return(Sca + Dca - 2*Dca*Sa);
}
static inline void CompositeMinus(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da);
composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da);
composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-(Sa-Da));
if ( (channel & RedChannel) != 0 )
composite->red=p->red-q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=p->green-q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=p->blue-q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=p->index-q->index;
}
}
static inline MagickRealType ModulusAdd(const MagickRealType p,
const MagickRealType Sa, const MagickRealType q, const MagickRealType Da)
{
MagickRealType
pixel;
pixel=p+q;
if (pixel > QuantumRange)
pixel-=(QuantumRange+1.0);
return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa));
}
static inline void CompositeModulusAdd(const MagickPixelPacket *p,
const MagickPixelPacket *q, const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=ModulusAdd(p->red,Sa,q->red,Da);
composite->green=ModulusAdd(p->green,Sa,q->green,Da);
composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=ModulusAdd(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity,
1.0,QuantumRange-q->opacity,1.0);
if ( (channel & RedChannel) != 0 )
composite->red=ModulusAdd(p->red,1.0,q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=ModulusAdd(p->green,1.0,q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=ModulusAdd(p->index,1.0,q->index,1.0);
}
}
static inline MagickRealType ModulusSubtract(const MagickRealType p,
const MagickRealType Sa, const MagickRealType q, const MagickRealType Da)
{
MagickRealType
pixel;
pixel=p-q;
if (pixel < 0.0)
pixel+=(QuantumRange+1.0);
return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa));
}
static inline void CompositeModulusSubtract(const MagickPixelPacket *p,
const MagickPixelPacket *q, const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma = RoundToUnity(Sa+Da-Sa*Da);
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=ModulusSubtract(p->red,Sa,q->red,Da);
composite->green=ModulusSubtract(p->green,Sa,q->green,Da);
composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=ModulusSubtract(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity,
1.0,QuantumRange-q->opacity,1.0);
if ( (channel & RedChannel) != 0 )
composite->red=ModulusSubtract(p->red,1.0,q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=ModulusSubtract(p->green,1.0,q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=ModulusSubtract(p->index,1.0,q->index,1.0);
}
}
static inline MagickRealType Multiply(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeMultiply(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Sa*Da);
if ( (channel & RedChannel) != 0 )
composite->red=QuantumScale*p->red*q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumScale*p->green*q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumScale*p->blue*q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumScale*p->index*q->index;
}
}
static inline MagickRealType Out(const MagickRealType p,
const MagickRealType Sa,const MagickRealType magick_unused(q),
const MagickRealType Da)
{
return(Sa*p*(1.0-Da));
}
static inline void CompositeOut(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa*(1.0-Da);
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Out(p->red,Sa,q->red,Da);
composite->green=gamma*Out(p->green,Sa,q->green,Da);
composite->blue=gamma*Out(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Out(p->index,Sa,q->index,Da);
}
static MagickRealType PegtopLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
PegTop: A Soft-Light alternative: A continuous version of the Softlight
function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs(Da) < MagickEpsilon)
return(Sca);
return(Dca*Dca*(Sa-2*Sca)/Da+Sca*(2*Dca+1-Da)+Dca*(1-Sa));
}
static inline void CompositePegtopLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType PinLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if (Dca*Sa < Da*(2*Sca-Sa))
return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
if ((Dca*Sa) > (2*Sca*Da))
return(Sca*Da+Sca+Dca*(1.0-Sa));
return(Sca*(1.0-Da)+Dca);
}
static inline void CompositePinLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Screen(const MagickRealType Sca,
const MagickRealType Dca)
{
/* Screen: A negated multiply
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
return(Sca+Dca-Sca*Dca);
}
static inline void CompositeScreen(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Sa,
Da,
gamma;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
Sa*=QuantumScale; Da*=QuantumScale; /* optimization */
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Screen(p->red*Sa,q->red*Da);
composite->green=gamma*Screen(p->green*Sa,q->green*Da);
composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Screen(p->index*Sa,q->index*Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Screen(Sa,Da));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*Screen(QuantumScale*p->red,
QuantumScale*q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*Screen(QuantumScale*p->green,
QuantumScale*q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*Screen(QuantumScale*p->blue,
QuantumScale*q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*Screen(QuantumScale*p->index,
QuantumScale*q->index);
}
}
static MagickRealType SoftLight(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification -- was found to be incorrect
See http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html.
*/
if (2.0*Sca < Sa)
return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa))+Sca*(1.0-Da)+Dca*(1.0-Sa));
if (8.0*Dca <= Da)
return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa)*(3.0-8.0*Dca/Da))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
return((Dca*Sa+(pow(Dca/Da,0.5)*Da-Dca)*(2.0*Sca-Sa))+Sca*(1.0-Da)+
Dca*(1.0-Sa));
#else
MagickRealType
alpha,
beta;
/*
New specification: March 2009 SVG specification.
*/
alpha=Dca/Da;
if ((2.0*Sca) < Sa)
return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa));
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0*
alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa);
return(beta);
}
beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa);
return(beta);
#endif
}
static inline void CompositeSoftLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
/*
Depreciated
Multiply difference by amount, if differance larger than threshold???
What use this is is completely unknown
The Opacity calculation appears to be inverted -- Anthony Thyssen
*/
static inline MagickRealType Threshold(const MagickRealType p,
const MagickRealType q,const MagickRealType threshold,
const MagickRealType amount)
{
MagickRealType
delta;
delta=p-q;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
return(q);
return(q+delta*amount);
}
static inline void CompositeThreshold(const MagickPixelPacket *p,
const MagickPixelPacket *q,const MagickRealType threshold,
const MagickRealType amount,MagickPixelPacket *composite)
{
composite->red=Threshold(p->red,q->red,threshold,amount);
composite->green=Threshold(p->green,q->green,threshold,amount);
composite->blue=Threshold(p->blue,q->blue,threshold,amount);
composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity,
threshold,amount);
if (q->colorspace == CMYKColorspace)
composite->index=Threshold(p->index,q->index,threshold,amount);
}
static MagickRealType VividLight(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da)
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon))
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
if ((2*Sca) <= Sa)
return(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeVividLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa,
const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*(1-Da)+Dca*(1-Sa));
}
static inline void CompositeXor(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma);
composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da);
composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da);
composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da);
}
static void HSBComposite(const double hue,const double saturation,
const double brightness,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
f,
h,
p,
q,
t;
/*
Convert HSB to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
if (saturation == 0.0)
{
*red=(MagickRealType) QuantumRange*brightness;
*green=(*red);
*blue=(*red);
return;
}
h=6.0*(hue-floor(hue));
f=h-floor((double) h);
p=brightness*(1.0-saturation);
q=brightness*(1.0-saturation*f);
t=brightness*(1.0-saturation*(1.0-f));
switch ((int) h)
{
case 0:
default:
{
*red=(MagickRealType) QuantumRange*brightness;
*green=(MagickRealType) QuantumRange*t;
*blue=(MagickRealType) QuantumRange*p;
break;
}
case 1:
{
*red=(MagickRealType) QuantumRange*q;
*green=(MagickRealType) QuantumRange*brightness;
*blue=(MagickRealType) QuantumRange*p;
break;
}
case 2:
{
*red=(MagickRealType) QuantumRange*p;
*green=(MagickRealType) QuantumRange*brightness;
*blue=(MagickRealType) QuantumRange*t;
break;
}
case 3:
{
*red=(MagickRealType) QuantumRange*p;
*green=(MagickRealType) QuantumRange*q;
*blue=(MagickRealType) QuantumRange*brightness;
break;
}
case 4:
{
*red=(MagickRealType) QuantumRange*t;
*green=(MagickRealType) QuantumRange*p;
*blue=(MagickRealType) QuantumRange*brightness;
break;
}
case 5:
{
*red=(MagickRealType) QuantumRange*brightness;
*green=(MagickRealType) QuantumRange*p;
*blue=(MagickRealType) QuantumRange*q;
break;
}
}
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const CompositeOperator compose,const Image *composite_image,
const ssize_t x_offset,const ssize_t y_offset)
{
MagickBooleanType
status;
status=CompositeImageChannel(image,DefaultChannels,compose,composite_image,
x_offset,y_offset);
return(status);
}
MagickExport MagickBooleanType CompositeImageChannel(Image *image,
const ChannelType channel,const CompositeOperator compose,
const Image *composite_image,const ssize_t x_offset,const ssize_t y_offset)
{
#define CompositeImageTag "Composite/Image"
CacheView
*composite_view,
*image_view;
const char
*value;
double
sans;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
Image
*destination_image;
MagickBooleanType
modify_outside_overlay,
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
amount,
destination_dissolve,
midpoint,
percent_brightness,
percent_saturation,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
/*
Prepare composite image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite_image != (Image *) NULL);
assert(composite_image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&zero);
destination_image=(Image *) NULL;
amount=0.5;
destination_dissolve=1.0;
modify_outside_overlay=MagickFalse;
percent_brightness=100.0;
percent_saturation=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case ClearCompositeOp:
case SrcCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
case OutCompositeOp:
case SrcOutCompositeOp:
case DstInCompositeOp:
case DstAtopCompositeOp:
{
/*
Modify destination outside the overlaid region.
*/
modify_outside_overlay=MagickTrue;
break;
}
case OverCompositeOp:
{
if (image->matte != MagickFalse)
break;
if (composite_image->matte != MagickFalse)
break;
}
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) composite_image->columns) >= (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) composite_image->rows) >= (ssize_t) image->rows)
break;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
composite_view=AcquireCacheView(composite_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) composite_image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*composite_indexes;
register const PixelPacket
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns,
1,exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
composite_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
composite_indexes=GetCacheViewVirtualIndexQueue(composite_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
(void) CopyMagickMemory(q,p,composite_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(composite_indexes != (const IndexPacket *) NULL))
(void) CopyMagickMemory(indexes,composite_indexes,
composite_image->columns*sizeof(*indexes));
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
composite_view=DestroyCacheView(composite_view);
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyOpacityCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify destination outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
modify_outside_overlay=MagickTrue;
break;
}
case BlurCompositeOp:
{
CacheView
*composite_view,
*destination_view;
MagickPixelPacket
pixel;
MagickRealType
angle_range,
angle_start,
height,
width;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
destination_image=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (destination_image == (Image *) NULL)
return(MagickFalse);
/*
Determine the horizontal and vertical maximim blur.
*/
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0 )
{
destination_image=DestroyImage(destination_image);
return(MagickFalse);
}
width=geometry_info.rho;
height=geometry_info.sigma;
blur.x1=geometry_info.rho;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=geometry_info.sigma;
angle_start=0.0;
angle_range=0.0;
if ((flags & HeightValue) == 0)
blur.y2=blur.x1;
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Blur Image by resampling.
*/
pixel=zero;
exception=(&image->exception);
resample_filter=AcquireResampleFilter(image,&image->exception);
SetResampleFilter(resample_filter,CubicFilter,2.0);
destination_view=AcquireCacheView(destination_image);
composite_view=AcquireCacheView(composite_image);
for (y=0; y < (ssize_t) composite_image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict r;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns,
1,exception);
r=QueueCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
break;
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
for (x=0; x < (ssize_t) composite_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p++;
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
ScaleResampleFilter(resample_filter,blur.x1*QuantumScale*
GetPixelRed(p),blur.y1*QuantumScale*
GetPixelGreen(p),blur.x2*QuantumScale*
GetPixelRed(p),blur.y2*QuantumScale*
GetPixelGreen(p));
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel);
SetPixelPacket(destination_image,&pixel,r,destination_indexes+x);
p++;
r++;
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
composite_view=DestroyCacheView(composite_view);
destination_view=DestroyCacheView(destination_view);
composite_image=destination_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*composite_view,
*destination_view,
*image_view;
MagickPixelPacket
pixel;
MagickRealType
horizontal_scale,
vertical_scale;
PointInfo
center,
offset;
register IndexPacket
*restrict destination_indexes;
register PixelPacket
*restrict r;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
destination_image=CloneImage(image,image->columns,image->rows,MagickTrue,
&image->exception);
if (destination_image == (Image *) NULL)
return(MagickFalse);
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue|HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (composite_image->columns-1.0)/
2.0;
vertical_scale=(MagickRealType) (composite_image->rows-1.0)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1.0)/2.0;
vertical_scale=(MagickRealType) (image->rows-1.0)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(composite_image->columns-1.0)/200.0;
vertical_scale*=(composite_image->rows-1.0)/200.0;
}
else
{
horizontal_scale*=(image->columns-1.0)/200.0;
vertical_scale*=(image->rows-1.0)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) == 0)
center.x=(MagickRealType) x_offset+(composite_image->columns-1)/
2.0;
else
center.x=((MagickRealType) image->columns-1)/2.0;
else
if ((flags & AspectValue) == 0)
center.x=(MagickRealType) x_offset+geometry_info.xi;
else
center.x=geometry_info.xi;
if ((flags & YValue) == 0)
if ((flags & AspectValue) == 0)
center.y=(MagickRealType) y_offset+(composite_image->rows-1)/2.0;
else
center.y=((MagickRealType) image->rows-1)/2.0;
else
if ((flags & AspectValue) == 0)
center.y=(MagickRealType) y_offset+geometry_info.psi;
else
center.y=geometry_info.psi;
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
pixel=zero;
exception=(&image->exception);
image_view=AcquireCacheView(image);
destination_view=AcquireCacheView(destination_image);
composite_view=AcquireCacheView(composite_image);
for (y=0; y < (ssize_t) composite_image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns,
1,exception);
r=QueueCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,&image->exception);
if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
break;
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
for (x=0; x < (ssize_t) composite_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p++;
continue;
}
/*
Displace the offset.
*/
offset.x=(horizontal_scale*(GetPixelRed(p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(vertical_scale*(GetPixelGreen(p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale*
pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p)));
SetPixelPacket(destination_image,&pixel,r,destination_indexes+x);
p++;
r++;
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
break;
}
destination_view=DestroyCacheView(destination_view);
composite_view=DestroyCacheView(composite_view);
image_view=DestroyCacheView(image_view);
composite_image=destination_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
destination_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
destination_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
destination_dissolve=geometry_info.sigma/100.0;
if ((destination_dissolve-MagickEpsilon) < 0.0)
destination_dissolve=0.0;
modify_outside_overlay=MagickTrue;
if ((destination_dissolve+MagickEpsilon) > 1.0 )
{
destination_dissolve=1.0;
modify_outside_overlay=MagickFalse;
}
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
destination_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
destination_dissolve=geometry_info.sigma/100.0;
modify_outside_overlay=MagickTrue;
if ((destination_dissolve+MagickEpsilon) > 1.0)
modify_outside_overlay=MagickFalse;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the brightness and saturation scale.
*/
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_brightness=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_saturation=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
This Composition method is depreciated
*/
value=GetImageArtifact(composite_image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
value=GetImageArtifact(composite_image,"compose:outside-overlay");
if (value != (const char *) NULL)
modify_outside_overlay=IsMagickTrue(value);
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
GetMagickPixelPacket(composite_image,&zero);
exception=(&image->exception);
image_view=AcquireCacheView(image);
composite_view=AcquireCacheView(composite_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*pixels;
double
brightness,
hue,
saturation;
MagickPixelPacket
composite,
destination,
source;
register const IndexPacket
*restrict composite_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
if (modify_outside_overlay == MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) composite_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(PixelPacket *) NULL;
p=(PixelPacket *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) composite_image->rows))
{
p=GetCacheViewVirtualPixels(composite_view,0,y-y_offset,
composite_image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset;
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
composite_indexes=GetCacheViewVirtualIndexQueue(composite_view);
source=zero;
destination=zero;
hue=0.0;
saturation=0.0;
brightness=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (modify_outside_overlay == MagickFalse)
{
if (x < x_offset)
{
q++;
continue;
}
if ((x-x_offset) >= (ssize_t) composite_image->columns)
break;
}
destination.red=(MagickRealType) GetPixelRed(q);
destination.green=(MagickRealType) GetPixelGreen(q);
destination.blue=(MagickRealType) GetPixelBlue(q);
if (image->matte != MagickFalse)
destination.opacity=(MagickRealType) GetPixelOpacity(q);
if (image->colorspace == CMYKColorspace)
destination.index=(MagickRealType) GetPixelIndex(indexes+x);
if (image->colorspace == CMYKColorspace)
{
destination.red=(MagickRealType) QuantumRange-destination.red;
destination.green=(MagickRealType) QuantumRange-destination.green;
destination.blue=(MagickRealType) QuantumRange-destination.blue;
destination.index=(MagickRealType) QuantumRange-destination.index;
}
/*
Handle destination modifications outside overlaid region.
*/
composite=destination;
if ((pixels == (PixelPacket *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) composite_image->columns))
{
switch (compose)
{
case DissolveCompositeOp:
case BlendCompositeOp:
{
composite.opacity=(MagickRealType) (QuantumRange-
destination_dissolve*(QuantumRange-composite.opacity));
break;
}
case ClearCompositeOp:
case SrcCompositeOp:
{
CompositeClear(&destination,&composite);
break;
}
case InCompositeOp:
case SrcInCompositeOp:
case OutCompositeOp:
case SrcOutCompositeOp:
case DstInCompositeOp:
case DstAtopCompositeOp:
case CopyOpacityCompositeOp:
case ChangeMaskCompositeOp:
{
composite.opacity=(MagickRealType) TransparentOpacity;
break;
}
default:
{
(void) GetOneVirtualMagickPixel(composite_image,x-x_offset,
y-y_offset,&composite,exception);
break;
}
}
if (image->colorspace == CMYKColorspace)
{
composite.red=(MagickRealType) QuantumRange-composite.red;
composite.green=(MagickRealType) QuantumRange-composite.green;
composite.blue=(MagickRealType) QuantumRange-composite.blue;
composite.index=(MagickRealType) QuantumRange-composite.index;
}
SetPixelRed(q,ClampToQuantum(composite.red));
SetPixelGreen(q,ClampToQuantum(composite.green));
SetPixelBlue(q,ClampToQuantum(composite.blue));
if (image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(composite.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,ClampToQuantum(composite.index));
q++;
continue;
}
/*
Handle normal overlay of source onto destination.
*/
source.red=(MagickRealType) GetPixelRed(p);
source.green=(MagickRealType) GetPixelGreen(p);
source.blue=(MagickRealType) GetPixelBlue(p);
if (composite_image->matte != MagickFalse)
source.opacity=(MagickRealType) GetPixelOpacity(p);
if (composite_image->colorspace == CMYKColorspace)
source.index=(MagickRealType) GetPixelIndex(composite_indexes+
x-x_offset);
if (composite_image->colorspace == CMYKColorspace)
{
source.red=(MagickRealType) QuantumRange-source.red;
source.green=(MagickRealType) QuantumRange-source.green;
source.blue=(MagickRealType) QuantumRange-source.blue;
source.index=(MagickRealType) QuantumRange-source.index;
}
switch (compose)
{
/* Duff-Porter Compositions */
case ClearCompositeOp:
{
CompositeClear(&destination,&composite);
break;
}
case SrcCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
{
composite=source;
break;
}
case NoCompositeOp:
case DstCompositeOp:
break;
case OverCompositeOp:
case SrcOverCompositeOp:
{
MagickPixelCompositeOver(&source,source.opacity,&destination,
destination.opacity,&composite);
break;
}
case DstOverCompositeOp:
{
MagickPixelCompositeOver(&destination,destination.opacity,&source,
source.opacity,&composite);
break;
}
case SrcInCompositeOp:
case InCompositeOp:
{
CompositeIn(&source,&destination,&composite);
break;
}
case DstInCompositeOp:
{
CompositeIn(&destination,&source,&composite);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
CompositeOut(&source,&destination,&composite);
break;
}
case DstOutCompositeOp:
{
CompositeOut(&destination,&source,&composite);
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
CompositeAtop(&source,&destination,&composite);
break;
}
case DstAtopCompositeOp:
{
CompositeAtop(&destination,&source,&composite);
break;
}
case XorCompositeOp:
{
CompositeXor(&source,&destination,&composite);
break;
}
/* Mathematical Compositions */
case PlusCompositeOp:
{
CompositePlus(&source,&destination,channel,&composite);
break;
}
case MinusDstCompositeOp:
{
CompositeMinus(&source,&destination,channel,&composite);
break;
}
case MinusSrcCompositeOp:
{
CompositeMinus(&destination,&source,channel,&composite);
break;
}
case ModulusAddCompositeOp:
{
CompositeModulusAdd(&source,&destination,channel,&composite);
break;
}
case ModulusSubtractCompositeOp:
{
CompositeModulusSubtract(&source,&destination,channel,&composite);
break;
}
case DifferenceCompositeOp:
{
CompositeDifference(&source,&destination,channel,&composite);
break;
}
case ExclusionCompositeOp:
{
CompositeExclusion(&source,&destination,channel,&composite);
break;
}
case MultiplyCompositeOp:
{
CompositeMultiply(&source,&destination,channel,&composite);
break;
}
case ScreenCompositeOp:
{
CompositeScreen(&source,&destination,channel,&composite);
break;
}
case DivideDstCompositeOp:
{
CompositeDivide(&source,&destination,channel,&composite);
break;
}
case DivideSrcCompositeOp:
{
CompositeDivide(&destination,&source,channel,&composite);
break;
}
case DarkenCompositeOp:
{
CompositeDarken(&source,&destination,channel,&composite);
break;
}
case LightenCompositeOp:
{
CompositeLighten(&source,&destination,channel,&composite);
break;
}
case DarkenIntensityCompositeOp:
{
CompositeDarkenIntensity(&source,&destination,channel,&composite);
break;
}
case LightenIntensityCompositeOp:
{
CompositeLightenIntensity(&source,&destination,channel,&composite);
break;
}
case MathematicsCompositeOp:
{
CompositeMathematics(&source,&destination,channel,&geometry_info,
&composite);
break;
}
/* Lighting Compositions */
case ColorDodgeCompositeOp:
{
CompositeColorDodge(&source,&destination,&composite);
break;
}
case ColorBurnCompositeOp:
{
CompositeColorBurn(&source,&destination,&composite);
break;
}
case LinearDodgeCompositeOp:
{
CompositeLinearDodge(&source,&destination,&composite);
break;
}
case LinearBurnCompositeOp:
{
CompositeLinearBurn(&source,&destination,&composite);
break;
}
case HardLightCompositeOp:
{
CompositeHardLight(&source,&destination,&composite);
break;
}
case OverlayCompositeOp:
{
/* Overlay = Reversed HardLight. */
CompositeHardLight(&destination,&source,&composite);
break;
}
case SoftLightCompositeOp:
{
CompositeSoftLight(&source,&destination,&composite);
break;
}
case LinearLightCompositeOp:
{
CompositeLinearLight(&source,&destination,&composite);
break;
}
case PegtopLightCompositeOp:
{
CompositePegtopLight(&source,&destination,&composite);
break;
}
case VividLightCompositeOp:
{
CompositeVividLight(&source,&destination,&composite);
break;
}
case PinLightCompositeOp:
{
CompositePinLight(&source,&destination,&composite);
break;
}
/* Other Composition */
case ChangeMaskCompositeOp:
{
if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) ||
(IsMagickColorSimilar(&source,&destination) != MagickFalse))
composite.opacity=(MagickRealType) TransparentOpacity;
else
composite.opacity=(MagickRealType) OpaqueOpacity;
break;
}
case BumpmapCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
CompositeBumpmap(&source,&destination,&composite);
break;
}
case DissolveCompositeOp:
{
MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange-
source_dissolve*(QuantumRange-source.opacity)),&destination,
(MagickRealType) (QuantumRange-destination_dissolve*(QuantumRange-
destination.opacity)),&composite);
break;
}
case BlendCompositeOp:
{
MagickPixelCompositeBlend(&source,source_dissolve,&destination,
destination_dissolve,&composite);
break;
}
case ThresholdCompositeOp:
{
CompositeThreshold(&source,&destination,threshold,amount,&composite);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (source.opacity == TransparentOpacity)
break;
offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint);
if (offset == 0)
break;
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
brightness+=(0.01*percent_brightness*offset)/midpoint;
saturation*=0.01*percent_saturation;
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
break;
}
case HueCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
CompositeHSB(source.red,source.green,source.blue,&hue,&sans,&sans);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case SaturateCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
CompositeHSB(source.red,source.green,source.blue,&sans,&saturation,
&sans);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case LuminizeCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&hue,
&saturation,&brightness);
CompositeHSB(source.red,source.green,source.blue,&sans,&sans,
&brightness);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case ColorizeCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (destination.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHSB(destination.red,destination.green,destination.blue,&sans,
&sans,&brightness);
CompositeHSB(source.red,source.green,source.blue,&hue,&saturation,
&sans);
HSBComposite(hue,saturation,brightness,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < destination.opacity)
composite.opacity=source.opacity;
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
composite.red=source.red;
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
composite.green=source.green;
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
composite.blue=source.blue;
break;
}
case CopyOpacityCompositeOp:
{
if (source.matte == MagickFalse)
{
composite.opacity=(MagickRealType) (QuantumRange-
MagickPixelIntensityToQuantum(&source));
break;
}
composite.opacity=source.opacity;
break;
}
case CopyBlackCompositeOp:
{
if (source.colorspace != CMYKColorspace)
ConvertRGBToCMYK(&source);
composite.index=source.index;
break;
}
/* compose methods that are already handled */
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
composite=source;
break;
}
default:
break;
}
if (image->colorspace == CMYKColorspace)
{
composite.red=(MagickRealType) QuantumRange-composite.red;
composite.green=(MagickRealType) QuantumRange-composite.green;
composite.blue=(MagickRealType) QuantumRange-composite.blue;
composite.index=(MagickRealType) QuantumRange-composite.index;
}
SetPixelRed(q,ClampToQuantum(composite.red));
SetPixelGreen(q,ClampToQuantum(composite.green));
SetPixelBlue(q,ClampToQuantum(composite.blue));
SetPixelOpacity(q,ClampToQuantum(composite.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,ClampToQuantum(composite.index));
p++;
if (p >= (pixels+composite_image->columns))
p=pixels;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImageChannel)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
composite_view=DestroyCacheView(composite_view);
image_view=DestroyCacheView(image_view);
if (destination_image != (Image * ) NULL)
destination_image=DestroyImage(destination_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
(void) SetImageVirtualPixelMethod(texture,TileVirtualPixelMethod);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse) ||
(texture->matte != MagickFalse)))
{
/*
Tile texture onto the image background.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,image->compose,texture,x+
texture->tile_offset.x,y+texture->tile_offset.y);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TextureImage)
#endif
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
texture_view=AcquireCacheView(texture);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*texture_indexes;
register const PixelPacket
*p;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*q;
size_t
width;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(texture_view,texture->tile_offset.x,(y+
texture->tile_offset.y) % texture->rows,texture->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
texture_indexes=GetCacheViewVirtualIndexQueue(texture_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture->columns)
{
width=texture->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
(void) CopyMagickMemory(q,p,width*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(texture->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(indexes,texture_indexes,width*
sizeof(*indexes));
indexes+=width;
}
q+=width;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TextureImage)
#endif
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
return(status);
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GrB_Matrix_wait.c | //------------------------------------------------------------------------------
// GrB_Matrix_wait: wait for a matrix to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Finishes all work on a matrix, followed by an OpenMP flush.
#include "GB.h"
#define GB_FREE_ALL ;
GrB_Info GrB_Matrix_wait // finish all work on a matrix
(
GrB_Matrix *A
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#pragma omp flush
GB_WHERE ((*A), "GrB_Matrix_wait (&A)") ;
GB_RETURN_IF_NULL (A) ;
GB_RETURN_IF_NULL_OR_FAULTY (*A) ;
//--------------------------------------------------------------------------
// finish all pending work on the matrix
//--------------------------------------------------------------------------
if (GB_ANY_PENDING_WORK (*A))
{
GrB_Info info ;
GB_BURBLE_START ("GrB_Matrix_wait") ;
GB_OK (GB_Matrix_wait (*A, Context)) ;
GB_BURBLE_END ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
chop_view=AcquireCacheView(chop_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,QuantumRange-PixelIntensityToQuantum(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=(Quantum) (QuantumRange-PixelIntensityToQuantum(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=(Quantum) (QuantumRange-PixelIntensityToQuantum(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireCacheView(images);
cmyk_view=AcquireCacheView(cmyk_image);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,QuantumRange-
PixelIntensityToQuantum(p));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((size_t) (page.x+page.width) > image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((size_t) (page.y+page.height) > image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
crop_view=AcquireCacheView(crop_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict crop_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CropImage)
#endif
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t MagickRound(MagickRealType x)
{
/*
Round the fraction to nearest integer.
*/
if (x >= 0.0)
return((ssize_t) (x+0.5));
return((ssize_t) (x-0.5));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
excerpt_view=AcquireCacheView(excerpt_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict excerpt_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExcerptImage)
#endif
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(extent_image,DirectClass) == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
if (extent_image->background_color.opacity != OpaqueOpacity)
extent_image->matte=MagickTrue;
(void) SetImageBackgroundColor(extent_image);
(void) CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireCacheView(image);
flip_view=AcquireCacheView(flip_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flip_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlipImage)
#endif
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireCacheView(image);
flop_view=AcquireCacheView(flop_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex( indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyImageRegion(Image *destination,
const Image *source,const size_t columns,const size_t rows,
const ssize_t sx,const ssize_t sy,const ssize_t dx,const ssize_t dy,
ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
status=MagickTrue;
source_view=AcquireCacheView(source);
destination_view=AcquireCacheView(destination);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict destination_indexes;
register PixelPacket
*restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status|=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status|=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status|=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
splice_view=AcquireCacheView(splice_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,
GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,
GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
image->columns,1,exception);
if ((y < 0) || (y >= (ssize_t) splice_image->rows))
continue;
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,
GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,
GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be depreciated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status|=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
transpose_view=AcquireCacheView(transpose_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transpose_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
transverse_view=AcquireCacheView(transverse_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transverse_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransverseImage)
#endif
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
GB_unaryop__lnot_int8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_fp32
// op(A') function: GB_tran__lnot_int8_fp32
// C type: int8_t
// A type: float
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int8_t z ; GB_CAST_SIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_fp32
(
int8_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack1to8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
__m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00_0 = _mm256_loadu_ps(k0);
__m256 _k01_0 = _mm256_loadu_ps(k0 + 8);
__m256 _k02_0 = _mm256_loadu_ps(k0 + 16);
__m256 _k10_0 = _mm256_loadu_ps(k0 + 24);
__m256 _k11_0 = _mm256_loadu_ps(k0 + 32);
__m256 _k12_0 = _mm256_loadu_ps(k0 + 40);
__m256 _k20_0 = _mm256_loadu_ps(k0 + 48);
__m256 _k21_0 = _mm256_loadu_ps(k0 + 56);
__m256 _k22_0 = _mm256_loadu_ps(k0 + 64);
__m256 _k00_1 = _mm256_loadu_ps(k1);
__m256 _k01_1 = _mm256_loadu_ps(k1 + 8);
__m256 _k02_1 = _mm256_loadu_ps(k1 + 16);
__m256 _k10_1 = _mm256_loadu_ps(k1 + 24);
__m256 _k11_1 = _mm256_loadu_ps(k1 + 32);
__m256 _k12_1 = _mm256_loadu_ps(k1 + 40);
__m256 _k20_1 = _mm256_loadu_ps(k1 + 48);
__m256 _k21_1 = _mm256_loadu_ps(k1 + 56);
__m256 _k22_1 = _mm256_loadu_ps(k1 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_sum01 = _mm256_fmadd_ps(_r02, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r03, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r12, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r22, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k22_0, _sum01);
_sum11 = _mm256_fmadd_ps(_r02, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r03, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r12, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r22, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k22_1, _sum11);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _sum12 = _mm256_loadu_ps(outptr1 + 16);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_sum02 = _mm256_fmadd_ps(_r03, _k00_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r04, _k01_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r05, _k02_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r13, _k10_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r14, _k11_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k12_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r23, _k20_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r24, _k21_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k22_0, _sum02);
_sum12 = _mm256_fmadd_ps(_r03, _k00_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r04, _k01_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r05, _k02_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r13, _k10_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r14, _k11_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r15, _k12_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r23, _k20_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r24, _k21_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r25, _k22_1, _sum12);
_mm256_storeu_ps(outptr0 + 16, _sum02);
_mm256_storeu_ps(outptr1 + 16, _sum12);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
__m256 _sum13 = _mm256_loadu_ps(outptr1 + 24);
_sum03 = _mm256_fmadd_ps(_r04, _k00_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r05, _k01_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r06, _k02_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r14, _k10_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r15, _k11_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r16, _k12_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r24, _k20_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r25, _k21_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r26, _k22_0, _sum03);
_sum13 = _mm256_fmadd_ps(_r04, _k00_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r05, _k01_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r06, _k02_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r14, _k10_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r15, _k11_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r16, _k12_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r24, _k20_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r25, _k21_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r26, _k22_1, _sum13);
_mm256_storeu_ps(outptr0 + 24, _sum03);
_mm256_storeu_ps(outptr1 + 24, _sum13);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 32;
outptr1 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_sum01 = _mm256_fmadd_ps(_r02, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r03, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r12, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r22, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k22_0, _sum01);
_sum11 = _mm256_fmadd_ps(_r02, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r03, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r12, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r22, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k22_1, _sum11);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 16;
outptr1 += 16;
}
for (; j < outw; j++)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
outptr1 += 8;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 8;
k1 += 9 * 8;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0);
__m256 _sum1 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_r02, _k00, _sum1);
_sum1 = _mm256_fmadd_ps(_r03, _k01, _sum1);
_sum1 = _mm256_fmadd_ps(_r04, _k02, _sum1);
_sum1 = _mm256_fmadd_ps(_r12, _k10, _sum1);
_sum1 = _mm256_fmadd_ps(_r13, _k11, _sum1);
_sum1 = _mm256_fmadd_ps(_r14, _k12, _sum1);
_sum1 = _mm256_fmadd_ps(_r22, _k20, _sum1);
_sum1 = _mm256_fmadd_ps(_r23, _k21, _sum1);
_sum1 = _mm256_fmadd_ps(_r24, _k22, _sum1);
__m256 _sum2 = _mm256_loadu_ps(outptr0 + 16);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_r03, _k00, _sum2);
_sum2 = _mm256_fmadd_ps(_r04, _k01, _sum2);
_sum2 = _mm256_fmadd_ps(_r05, _k02, _sum2);
_sum2 = _mm256_fmadd_ps(_r13, _k10, _sum2);
_sum2 = _mm256_fmadd_ps(_r14, _k11, _sum2);
_sum2 = _mm256_fmadd_ps(_r15, _k12, _sum2);
_sum2 = _mm256_fmadd_ps(_r23, _k20, _sum2);
_sum2 = _mm256_fmadd_ps(_r24, _k21, _sum2);
_sum2 = _mm256_fmadd_ps(_r25, _k22, _sum2);
__m256 _sum3 = _mm256_loadu_ps(outptr0 + 24);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_r04, _k00, _sum3);
_sum3 = _mm256_fmadd_ps(_r05, _k01, _sum3);
_sum3 = _mm256_fmadd_ps(_r06, _k02, _sum3);
_sum3 = _mm256_fmadd_ps(_r14, _k10, _sum3);
_sum3 = _mm256_fmadd_ps(_r15, _k11, _sum3);
_sum3 = _mm256_fmadd_ps(_r16, _k12, _sum3);
_sum3 = _mm256_fmadd_ps(_r24, _k20, _sum3);
_sum3 = _mm256_fmadd_ps(_r25, _k21, _sum3);
_sum3 = _mm256_fmadd_ps(_r26, _k22, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0);
__m256 _sum1 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_r02, _k00, _sum1);
_sum1 = _mm256_fmadd_ps(_r03, _k01, _sum1);
_sum1 = _mm256_fmadd_ps(_r04, _k02, _sum1);
_sum1 = _mm256_fmadd_ps(_r12, _k10, _sum1);
_sum1 = _mm256_fmadd_ps(_r13, _k11, _sum1);
_sum1 = _mm256_fmadd_ps(_r14, _k12, _sum1);
_sum1 = _mm256_fmadd_ps(_r22, _k20, _sum1);
_sum1 = _mm256_fmadd_ps(_r23, _k21, _sum1);
_sum1 = _mm256_fmadd_ps(_r24, _k22, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum0);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum0 = _mm256_fmadd_ps(_r01, _k00, _sum0);
_sum0 = _mm256_fmadd_ps(_r02, _k01, _sum0);
_sum0 = _mm256_fmadd_ps(_r03, _k02, _sum0);
_sum0 = _mm256_fmadd_ps(_r11, _k10, _sum0);
_sum0 = _mm256_fmadd_ps(_r12, _k11, _sum0);
_sum0 = _mm256_fmadd_ps(_r13, _k12, _sum0);
_sum0 = _mm256_fmadd_ps(_r21, _k20, _sum0);
_sum0 = _mm256_fmadd_ps(_r22, _k21, _sum0);
_sum0 = _mm256_fmadd_ps(_r23, _k22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 1;
r1 += 1;
r2 += 1;
outptr0 += 8;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9 * 8;
}
}
}
static void conv3x3s2_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p + 1);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
__m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
out1.fill(_bias1);
const float* k0 = kernel.channel(p);
const float* k1 = kernel.channel(p + 1);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00_0 = _mm256_loadu_ps(k0);
__m256 _k01_0 = _mm256_loadu_ps(k0 + 8);
__m256 _k02_0 = _mm256_loadu_ps(k0 + 16);
__m256 _k10_0 = _mm256_loadu_ps(k0 + 24);
__m256 _k11_0 = _mm256_loadu_ps(k0 + 32);
__m256 _k12_0 = _mm256_loadu_ps(k0 + 40);
__m256 _k20_0 = _mm256_loadu_ps(k0 + 48);
__m256 _k21_0 = _mm256_loadu_ps(k0 + 56);
__m256 _k22_0 = _mm256_loadu_ps(k0 + 64);
__m256 _k00_1 = _mm256_loadu_ps(k1);
__m256 _k01_1 = _mm256_loadu_ps(k1 + 8);
__m256 _k02_1 = _mm256_loadu_ps(k1 + 16);
__m256 _k10_1 = _mm256_loadu_ps(k1 + 24);
__m256 _k11_1 = _mm256_loadu_ps(k1 + 32);
__m256 _k12_1 = _mm256_loadu_ps(k1 + 40);
__m256 _k20_1 = _mm256_loadu_ps(k1 + 48);
__m256 _k21_1 = _mm256_loadu_ps(k1 + 56);
__m256 _k22_1 = _mm256_loadu_ps(k1 + 64);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
for (; nn > 0; nn--)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _sum11 = _mm256_loadu_ps(outptr1 + 8);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
_sum01 = _mm256_fmadd_ps(_r03, _k00_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21_0, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22_0, _sum01);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _sum12 = _mm256_loadu_ps(outptr1 + 16);
_sum11 = _mm256_fmadd_ps(_r03, _k00_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r04, _k01_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r05, _k02_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r13, _k10_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r14, _k11_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r15, _k12_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r23, _k20_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r24, _k21_1, _sum11);
_sum11 = _mm256_fmadd_ps(_r25, _k22_1, _sum11);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r07 = _mm256_broadcast_ss(r0 + 6);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r17 = _mm256_broadcast_ss(r1 + 6);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _r27 = _mm256_broadcast_ss(r2 + 6);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_mm256_storeu_ps(outptr1 + 8, _sum11);
_sum02 = _mm256_fmadd_ps(_r05, _k00_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r06, _k01_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r07, _k02_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k10_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r16, _k11_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r17, _k12_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k20_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r26, _k21_0, _sum02);
_sum02 = _mm256_fmadd_ps(_r27, _k22_0, _sum02);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
__m256 _sum13 = _mm256_loadu_ps(outptr1 + 24);
_sum12 = _mm256_fmadd_ps(_r05, _k00_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r06, _k01_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r07, _k02_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r15, _k10_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r16, _k11_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r17, _k12_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r25, _k20_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r26, _k21_1, _sum12);
_sum12 = _mm256_fmadd_ps(_r27, _k22_1, _sum12);
__m256 _r08 = _mm256_broadcast_ss(r0 + 7);
__m256 _r09 = _mm256_broadcast_ss(r0 + 8);
__m256 _r18 = _mm256_broadcast_ss(r1 + 7);
__m256 _r19 = _mm256_broadcast_ss(r1 + 8);
__m256 _r28 = _mm256_broadcast_ss(r2 + 7);
__m256 _r29 = _mm256_broadcast_ss(r2 + 8);
_mm256_storeu_ps(outptr0 + 16, _sum02);
_mm256_storeu_ps(outptr1 + 16, _sum12);
_sum03 = _mm256_fmadd_ps(_r07, _k00_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r08, _k01_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r09, _k02_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r17, _k10_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r18, _k11_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r19, _k12_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r27, _k20_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r28, _k21_0, _sum03);
_sum03 = _mm256_fmadd_ps(_r29, _k22_0, _sum03);
_sum13 = _mm256_fmadd_ps(_r07, _k00_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r08, _k01_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r09, _k02_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r17, _k10_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r18, _k11_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r19, _k12_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r27, _k20_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r28, _k21_1, _sum13);
_sum13 = _mm256_fmadd_ps(_r29, _k22_1, _sum13);
_mm256_storeu_ps(outptr0 + 24, _sum03);
_mm256_storeu_ps(outptr1 + 24, _sum13);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 32;
outptr1 += 32;
}
for (; remain > 0; remain--)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _sum10 = _mm256_loadu_ps(outptr1);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21_0, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22_0, _sum00);
_sum10 = _mm256_fmadd_ps(_r01, _k00_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r02, _k01_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r03, _k02_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r11, _k10_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r12, _k11_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r13, _k12_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r21, _k20_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r22, _k21_1, _sum10);
_sum10 = _mm256_fmadd_ps(_r23, _k22_1, _sum10);
_mm256_storeu_ps(outptr0, _sum00);
_mm256_storeu_ps(outptr1, _sum10);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
outptr1 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 8;
k1 += 9 * 8;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
const float* k0 = kernel.channel(p);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int nn = outw >> 2;
int remain = outw & 3;
for (; nn > 0; nn--)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22, _sum00);
__m256 _sum01 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r04 = _mm256_broadcast_ss(r0 + 3);
__m256 _r05 = _mm256_broadcast_ss(r0 + 4);
__m256 _r14 = _mm256_broadcast_ss(r1 + 3);
__m256 _r15 = _mm256_broadcast_ss(r1 + 4);
__m256 _r24 = _mm256_broadcast_ss(r2 + 3);
__m256 _r25 = _mm256_broadcast_ss(r2 + 4);
_mm256_storeu_ps(outptr0, _sum00);
_sum01 = _mm256_fmadd_ps(_r03, _k00, _sum01);
_sum01 = _mm256_fmadd_ps(_r04, _k01, _sum01);
_sum01 = _mm256_fmadd_ps(_r05, _k02, _sum01);
_sum01 = _mm256_fmadd_ps(_r13, _k10, _sum01);
_sum01 = _mm256_fmadd_ps(_r14, _k11, _sum01);
_sum01 = _mm256_fmadd_ps(_r15, _k12, _sum01);
_sum01 = _mm256_fmadd_ps(_r23, _k20, _sum01);
_sum01 = _mm256_fmadd_ps(_r24, _k21, _sum01);
_sum01 = _mm256_fmadd_ps(_r25, _k22, _sum01);
__m256 _sum02 = _mm256_loadu_ps(outptr0 + 16);
__m256 _r06 = _mm256_broadcast_ss(r0 + 5);
__m256 _r07 = _mm256_broadcast_ss(r0 + 6);
__m256 _r16 = _mm256_broadcast_ss(r1 + 5);
__m256 _r17 = _mm256_broadcast_ss(r1 + 6);
__m256 _r26 = _mm256_broadcast_ss(r2 + 5);
__m256 _r27 = _mm256_broadcast_ss(r2 + 6);
_mm256_storeu_ps(outptr0 + 8, _sum01);
_sum02 = _mm256_fmadd_ps(_r05, _k00, _sum02);
_sum02 = _mm256_fmadd_ps(_r06, _k01, _sum02);
_sum02 = _mm256_fmadd_ps(_r07, _k02, _sum02);
_sum02 = _mm256_fmadd_ps(_r15, _k10, _sum02);
_sum02 = _mm256_fmadd_ps(_r16, _k11, _sum02);
_sum02 = _mm256_fmadd_ps(_r17, _k12, _sum02);
_sum02 = _mm256_fmadd_ps(_r25, _k20, _sum02);
_sum02 = _mm256_fmadd_ps(_r26, _k21, _sum02);
_sum02 = _mm256_fmadd_ps(_r27, _k22, _sum02);
__m256 _sum03 = _mm256_loadu_ps(outptr0 + 24);
__m256 _r08 = _mm256_broadcast_ss(r0 + 7);
__m256 _r09 = _mm256_broadcast_ss(r0 + 8);
__m256 _r18 = _mm256_broadcast_ss(r1 + 7);
__m256 _r19 = _mm256_broadcast_ss(r1 + 8);
__m256 _r28 = _mm256_broadcast_ss(r2 + 7);
__m256 _r29 = _mm256_broadcast_ss(r2 + 8);
_mm256_storeu_ps(outptr0 + 16, _sum02);
_sum03 = _mm256_fmadd_ps(_r07, _k00, _sum03);
_sum03 = _mm256_fmadd_ps(_r08, _k01, _sum03);
_sum03 = _mm256_fmadd_ps(_r09, _k02, _sum03);
_sum03 = _mm256_fmadd_ps(_r17, _k10, _sum03);
_sum03 = _mm256_fmadd_ps(_r18, _k11, _sum03);
_sum03 = _mm256_fmadd_ps(_r19, _k12, _sum03);
_sum03 = _mm256_fmadd_ps(_r27, _k20, _sum03);
_sum03 = _mm256_fmadd_ps(_r28, _k21, _sum03);
_sum03 = _mm256_fmadd_ps(_r29, _k22, _sum03);
_mm256_storeu_ps(outptr0 + 24, _sum03);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 32;
}
for (; remain > 0; remain--)
{
__m256 _sum00 = _mm256_loadu_ps(outptr0);
__m256 _r01 = _mm256_broadcast_ss(r0);
__m256 _r02 = _mm256_broadcast_ss(r0 + 1);
__m256 _r03 = _mm256_broadcast_ss(r0 + 2);
__m256 _r11 = _mm256_broadcast_ss(r1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 1);
__m256 _r13 = _mm256_broadcast_ss(r1 + 2);
__m256 _r21 = _mm256_broadcast_ss(r2);
__m256 _r22 = _mm256_broadcast_ss(r2 + 1);
__m256 _r23 = _mm256_broadcast_ss(r2 + 2);
_sum00 = _mm256_fmadd_ps(_r01, _k00, _sum00);
_sum00 = _mm256_fmadd_ps(_r02, _k01, _sum00);
_sum00 = _mm256_fmadd_ps(_r03, _k02, _sum00);
_sum00 = _mm256_fmadd_ps(_r11, _k10, _sum00);
_sum00 = _mm256_fmadd_ps(_r12, _k11, _sum00);
_sum00 = _mm256_fmadd_ps(_r13, _k12, _sum00);
_sum00 = _mm256_fmadd_ps(_r21, _k20, _sum00);
_sum00 = _mm256_fmadd_ps(_r22, _k21, _sum00);
_sum00 = _mm256_fmadd_ps(_r23, _k22, _sum00);
_mm256_storeu_ps(outptr0, _sum00);
r0 += 2;
r1 += 2;
r2 += 2;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9 * 8;
}
}
}
|
blas.c | #include "blas.h"
#include "utils.h"
#include <math.h>
#include <assert.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out)
{
int b,i,j,k;
int in_c = out_c/(stride*stride);
//printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward);
//printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride);
for(b = 0; b < batch; ++b){
for(k = 0; k < out_c; ++k){
for(j = 0; j < out_h; ++j){
for(i = 0; i < out_w; ++i){
int in_index = i + out_w*(j + out_h*(k + out_c*b));
int c2 = k % in_c;
int offset = k / in_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b));
if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0)
else out[in_index] = x[out_index];
}
}
}
}
}
void flatten(float *x, int size, int layers, int batch, int forward)
{
float* swap = (float*)xcalloc(size * layers * batch, sizeof(float));
int i,c,b;
for(b = 0; b < batch; ++b){
for(c = 0; c < layers; ++c){
for(i = 0; i < size; ++i){
int i1 = b*layers*size + c*size + i;
int i2 = b*layers*size + i*layers + c;
if (forward) swap[i2] = x[i1];
else swap[i1] = x[i2];
}
}
}
memcpy(x, swap, size*layers*batch*sizeof(float));
free(swap);
}
void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c)
{
int i;
for(i = 0; i < n; ++i){
c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
}
}
void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc)
{
int i;
for(i = 0; i < n; ++i){
if(da) da[i] += dc[i] * s[i];
if(db) db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * (a[i] - b[i]);
}
}
static float relu(float src) {
if (src > 0) return src;
return 0;
}
void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
{
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int id;
#pragma omp parallel for
for (id = 0; id < size; ++id) {
int src_id = id;
const int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float sum = 1, max_val = -FLT_MAX;
int i;
if (weights && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += relu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
}
if (weights) {
float w = weights[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out[id] = in[id] * w; // [0 or c or (c, h ,w)]
}
else out[id] = in[id];
// layers
for (i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
int out_index = id;
float *add = layers_output[i];
if (weights) {
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)]
}
else out[out_index] += add[add_index];
}
}
}
}
void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers,
float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalization)
{
// nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
int step = 0;
if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
int id;
#pragma omp parallel for
for (id = 0; id < size; ++id) {
int src_id = id;
int src_i = src_id % src_outputs;
src_id /= src_outputs;
int src_b = src_id;
float grad = 1, sum = 1, max_val = -FLT_MAX;;
int i;
if (weights && weights_normalization) {
if (weights_normalization == SOFTMAX_NORMALIZATION) {
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (max_val < w) max_val = w;
}
}
const float eps = 0.0001;
sum = eps;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) sum += relu(w);
else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
}
/*
grad = 0;
for (i = 0; i < (n + 1); ++i) {
const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
const float delta_w = delta_in[id] * in[id];
const float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum;
}
*/
}
if (weights) {
float w = weights[src_i / step];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)]
weight_updates[src_i / step] += delta_in[id] * in[id] * grad;
}
else delta_out[id] += delta_in[id];
// layers
for (i = 0; i < n; ++i) {
int add_outputs = outputs_of_layers[i];
if (src_i < add_outputs) {
int add_index = add_outputs*src_b + src_i;
int out_index = id;
float *layer_delta = layers_delta[i];
if (weights) {
float *add = layers_output[i];
const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
float w = weights[weights_index];
if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)]
weight_updates[weights_index] += delta_in[id] * add[add_index] * grad;
}
else layer_delta[add_index] += delta_in[id];
}
}
}
}
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i,j,k,b;
for(b = 0; b < batch; ++b){
for(k = 0; k < minc; ++k){
for(j = 0; j < minh; ++j){
for(i = 0; i < minw; ++i){
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
}
}
}
}
void mean_cpu(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1./(batch * spatial);
int i,j,k;
for(i = 0; i < filters; ++i){
mean[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
mean[i] += x[index];
}
}
mean[i] *= scale;
}
}
void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1./(batch * spatial - 1);
int i,j,k;
for(i = 0; i < filters; ++i){
variance[i] = 0;
for(j = 0; j < batch; ++j){
for(k = 0; k < spatial; ++k){
int index = j*filters*spatial + i*spatial + k;
variance[i] += pow((x[index] - mean[i]), 2);
}
}
variance[i] *= scale;
}
}
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int b, f, i;
for(b = 0; b < batch; ++b){
for(f = 0; f < filters; ++f){
for(i = 0; i < spatial; ++i){
int index = b*filters*spatial + f*spatial + i;
x[index] = (x[index] - mean[f])/(sqrt(variance[f] + .00001f));
}
}
}
}
void const_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
void mul_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX];
}
void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA);
}
void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX];
}
void scal_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA;
}
void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX)
{
int i;
for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA;
}
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
if (INCX == 1 && ALPHA == 0) {
memset(X, 0, N * sizeof(float));
}
else {
for (i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
}
void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i, j;
int index = 0;
for(j = 0; j < B; ++j) {
for(i = 0; i < NX; ++i){
if(X) X[j*NX + i] += OUT[index];
++index;
}
for(i = 0; i < NY; ++i){
if(Y) Y[j*NY + i] += OUT[index];
++index;
}
}
}
void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
{
int i, j;
int index = 0;
for(j = 0; j < B; ++j) {
for(i = 0; i < NX; ++i){
OUT[index++] = X[j*NX + i];
}
for(i = 0; i < NY; ++i){
OUT[index++] = Y[j*NY + i];
}
}
}
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void mult_add_into_cpu(int N, float *X, float *Y, float *Z)
{
int i;
for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i];
}
void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float diff = truth[i] - pred[i];
float abs_val = fabs(diff);
if(abs_val < 1) {
error[i] = diff * diff;
delta[i] = diff;
}
else {
error[i] = 2*abs_val - 1;
delta[i] = (diff > 0) ? 1 : -1;
}
}
}
void l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float diff = truth[i] - pred[i];
error[i] = fabs(diff);
delta[i] = diff > 0 ? 1 : -1;
}
}
void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float t = truth[i];
float p = pred[i];
error[i] = (t) ? -log(p) : 0;
delta[i] = t-p;
}
}
void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float t = truth[i];
float p = pred[i];
error[i] = -t*log(p) - (1-t)*log(1-p);
delta[i] = t-p;
}
}
void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float diff = truth[i] - pred[i];
error[i] = diff * diff;
delta[i] = diff;
}
}
float dot_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
float dot = 0;
for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY];
return dot;
}
void softmax(float *input, int n, float temp, float *output, int stride)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for(i = 0; i < n; ++i){
if(input[i*stride] > largest) largest = input[i*stride];
}
for(i = 0; i < n; ++i){
float e = exp(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int g, b;
for(b = 0; b < batch; ++b){
for(g = 0; g < groups; ++g){
softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride);
}
}
}
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for (b = 0; b < batch; ++b) {
for (k = 0; k < c; ++k) {
for (j = 0; j < h*stride; ++j) {
for (i = 0; i < w*stride; ++i) {
int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if (forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
void constrain_cpu(int size, float ALPHA, float *X)
{
int i;
for (i = 0; i < size; ++i) {
X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i]));
}
}
void fix_nan_and_inf_cpu(float *input, size_t size)
{
int i;
for (i = 0; i < size; ++i) {
float val = input[i];
if (isnan(val) || isinf(val))
input[i] = 1.0f / i; // pseudo random value
}
}
void get_embedding(float *src, int src_w, int src_h, int src_c, int embedding_size, int cur_w, int cur_h, int cur_n, int cur_b, float *dst)
{
int i;
for (i = 0; i < embedding_size; ++i) {
const int src_index = cur_b*(src_c*src_h*src_w) + cur_n*(embedding_size*src_h*src_w) + i*src_h*src_w + cur_h*(src_w) + cur_w;
const float val = src[src_index];
dst[i] = val;
//printf(" val = %f, ", val);
}
}
// Euclidean_norm
float math_vector_length(float *A, unsigned int feature_size)
{
float sum = 0;
int i;
for (i = 0; i < feature_size; ++i)
{
sum += A[i] * A[i];
}
float vector_length = sqrtf(sum);
return vector_length;
}
float cosine_similarity(float *A, float *B, unsigned int feature_size)
{
float mul = 0.0, d_a = 0.0, d_b = 0.0;
int i;
for(i = 0; i < feature_size; ++i)
{
mul += A[i] * B[i];
d_a += A[i] * A[i];
d_b += B[i] * B[i];
}
float similarity;
float divider = sqrtf(d_a) * sqrtf(d_b);
if (divider > 0) similarity = mul / divider;
else similarity = 0;
return similarity;
}
int get_sim_P_index(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
if (z == contrast_p_size) {
return -1; // not found
}
return z; // found
}
int check_sim(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
if (z == contrast_p_size) {
return 0; // not found
}
return 1; // found
}
float find_sim(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
if (z == contrast_p_size) {
printf(" Error: find_sim(): sim isn't found: i = %d, j = %d, z = %d \n", i, j, z);
getchar();
}
return contrast_p[z].sim;
}
float find_P_constrastive(size_t i, size_t j, contrastive_params *contrast_p, int contrast_p_size)
{
size_t z;
for (z = 0; z < contrast_p_size; ++z) {
if (contrast_p[z].i == i && contrast_p[z].j == j) break;
}
if (z == contrast_p_size) {
printf(" Error: find_P_constrastive(): P isn't found: i = %d, j = %d, z = %d \n", i, j, z);
getchar();
}
return contrast_p[z].P;
}
// num_of_samples = 2 * loaded_images = mini_batch_size
float P_constrastive_f_det(size_t il, int *labels, float **z, unsigned int feature_size, float temperature, contrastive_params *contrast_p, int contrast_p_size)
{
const float sim = contrast_p[il].sim;
const size_t i = contrast_p[il].i;
const size_t j = contrast_p[il].j;
const float numerator = expf(sim / temperature);
float denominator = 0;
int k;
for (k = 0; k < contrast_p_size; ++k) {
contrastive_params cp = contrast_p[k];
//if (k != i && labels[k] != labels[i]) {
//if (k != i) {
if (cp.i != i && cp.j == j) {
//const float sim_den = cp.sim;
////const float sim_den = find_sim(k, l, contrast_p, contrast_p_size); // cosine_similarity(z[k], z[l], feature_size);
//denominator += expf(sim_den / temperature);
denominator += cp.exp_sim;
}
}
float result = 0.9999;
if (denominator != 0) result = numerator / denominator;
if (result > 1) result = 0.9999;
return result;
}
// num_of_samples = 2 * loaded_images = mini_batch_size
float P_constrastive_f(size_t i, size_t l, int *labels, float **z, unsigned int feature_size, float temperature, contrastive_params *contrast_p, int contrast_p_size)
{
if (i == l) {
fprintf(stderr, " Error: in P_constrastive must be i != l, while i = %d, l = %d \n", i, l);
getchar();
}
const float sim = find_sim(i, l, contrast_p, contrast_p_size); // cosine_similarity(z[i], z[l], feature_size);
const float numerator = expf(sim / temperature);
float denominator = 0;
int k;
for (k = 0; k < contrast_p_size; ++k) {
contrastive_params cp = contrast_p[k];
//if (k != i && labels[k] != labels[i]) {
//if (k != i) {
if (cp.i != i && cp.j == l) {
//const float sim_den = cp.sim;
////const float sim_den = find_sim(k, l, contrast_p, contrast_p_size); // cosine_similarity(z[k], z[l], feature_size);
//denominator += expf(sim_den / temperature);
denominator += cp.exp_sim;
}
}
float result = 0.9999;
if (denominator != 0) result = numerator / denominator;
if (result > 1) result = 0.9999;
return result;
}
void grad_contrastive_loss_positive_f(size_t i, int *class_ids, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *delta, int wh, contrastive_params *contrast_p, int contrast_p_size)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j] && labels[i] >= 0) N++;
}
if (N == 0 || temperature == 0 || vec_len == 0) {
fprintf(stderr, " Error: N == 0 || temperature == 0 || vec_len == 0. N=%f, temperature=%f, vec_len=%f, labels[i] = %d \n",
N, temperature, vec_len, labels[i]);
getchar();
return;
}
const float mult = 1 / ((N - 1) * temperature * vec_len);
for (j = 0; j < num_of_samples; ++j) {
//if (i != j && (i/2) == (j/2)) {
if (i != j && labels[i] == labels[j] && labels[i] >= 0) {
//printf(" i = %d, j = %d, num_of_samples = %d, labels[i] = %d, labels[j] = %d \n",
// i, j, num_of_samples, labels[i], labels[j]);
const int sim_P_i = get_sim_P_index(i, j, contrast_p, contrast_p_size);
if (sim_P_i < 0) continue;
const float sim = contrast_p[sim_P_i].sim;
const float P = contrast_p[sim_P_i].P;
//if (!check_sim(i, j, contrast_p, contrast_p_size)) continue;
//const float sim = find_sim(i, j, contrast_p, contrast_p_size); //cos_sim[i*num_of_samples + j]; // cosine_similarity(z[i], z[j], feature_size);
//const float P = find_P_constrastive(i, j, contrast_p, contrast_p_size); //p_constrastive[i*num_of_samples + j]; // P_constrastive(i, j, labels, num_of_samples, z, feature_size, temperature, cos_sim);
//const float custom_pos_mult = 1 - sim;
int m;
//const float d = mult*(sim * z[i][m] - z[j][m]) * (1 - P); // 1
for (m = 0; m < feature_size; ++m) {
//const float d = mult*(sim * z[j][m] - z[j][m]) * (1 - P); // my
//const float d = mult*(sim * z[i][m] + sim * z[j][m] - z[j][m]) *(1 - P); // 1+2
const float d = mult*(sim * z[i][m] - z[j][m]) *(1 - P); // 1 (70%)
//const float d = mult*(sim * z[j][m] - z[j][m]) * (1 - P); // 2
// printf(" pos: z[j][m] = %f, z[i][m] = %f, d = %f, sim = %f \n", z[j][m], z[i][m], d, sim);
const int out_i = m * wh;
delta[out_i] -= d;
}
}
}
}
void grad_contrastive_loss_negative_f(size_t i, int *class_ids, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *delta, int wh, contrastive_params *contrast_p, int contrast_p_size, int neg_max)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j] && labels[i] >= 0) N++;
}
if (N == 0 || temperature == 0 || vec_len == 0) {
fprintf(stderr, " Error: N == 0 || temperature == 0 || vec_len == 0. N=%f, temperature=%f, vec_len=%f, labels[i] = %d \n",
N, temperature, vec_len, labels[i]);
getchar();
return;
}
const float mult = 1 / ((N - 1) * temperature * vec_len);
int neg_counter = 0;
for (j = 0; j < num_of_samples; ++j) {
//if (i != j && (i/2) == (j/2)) {
if (labels[i] >= 0 && labels[i] == labels[j] && i != j) {
size_t k;
for (k = 0; k < num_of_samples; ++k) {
//if (k != i && k != j && labels[k] != labels[i]) {
if (k != i && k != j && labels[k] != labels[i] && class_ids[j] == class_ids[k]) {
neg_counter++;
const int sim_P_i = get_sim_P_index(i, k, contrast_p, contrast_p_size);
if (sim_P_i < 0) continue;
const float sim = contrast_p[sim_P_i].sim;
const float P = contrast_p[sim_P_i].P;
//if (!check_sim(i, k, contrast_p, contrast_p_size)) continue;
//const float sim = find_sim(i, k, contrast_p, contrast_p_size); //cos_sim[i*num_of_samples + k]; // cosine_similarity(z[i], z[k], feature_size);
//const float P = find_P_constrastive(i, k, contrast_p, contrast_p_size); //p_constrastive[i*num_of_samples + k]; // P_constrastive(i, k, labels, num_of_samples, z, feature_size, temperature, cos_sim);
//const float custom_pos_mult = 1 + sim;
int m;
//const float d = mult*(z[k][m] + sim * z[i][m]) * P; // my1
for (m = 0; m < feature_size; ++m) {
//const float d = mult*(z[k][m] + sim * z[i][m]) * P; // 1 (70%)
//const float d = mult*(z[k][m] - sim * z[k][m] - sim * z[i][m]) * P; // 1+2
const float d = mult*(z[k][m] - sim * z[i][m]) * P; // 1 (70%)
//const float d = mult*(z[k][m] - sim * z[k][m]) * P; // 2
//printf(" neg: z[k][m] = %f, z[i][m] = %f, d = %f, sim = %f \n", z[k][m], z[i][m], d, sim);
const int out_i = m * wh;
delta[out_i] -= d;
}
if (neg_counter >= neg_max) return;
}
}
}
}
}
// num_of_samples = 2 * loaded_images = mini_batch_size
float P_constrastive(size_t i, size_t l, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *exp_cos_sim)
{
if (i == l) {
fprintf(stderr, " Error: in P_constrastive must be i != l, while i = %d, l = %d \n", i, l);
getchar();
}
//const float sim = cos_sim[i*num_of_samples + l]; // cosine_similarity(z[i], z[l], feature_size);
//const float numerator = expf(sim / temperature);
const float numerator = exp_cos_sim[i*num_of_samples + l];
float denominator = 0;
int k;
for (k = 0; k < num_of_samples; ++k) {
//if (k != i && labels[k] != labels[i]) {
if (k != i) {
//const float sim_den = cos_sim[k*num_of_samples + l]; // cosine_similarity(z[k], z[l], feature_size);
//denominator += expf(sim_den / temperature);
denominator += exp_cos_sim[k*num_of_samples + l];
}
}
float result = numerator / denominator;
return result;
}
// i - id of the current sample in mini_batch
// labels[num_of_samples] - array with class_id for each sample in the current mini_batch
// z[feature_size][num_of_samples] - array of arrays with contrastive features (output of conv-layer, f.e. 128 floats for each sample)
// delta[feature_size] - array with deltas for backpropagation
// temperature - scalar temperature param (temperature > 0), f.e. temperature = 0.07: Supervised Contrastive Learning
void grad_contrastive_loss_positive(size_t i, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta, int wh)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j]) N++;
}
if (N == 0 || temperature == 0 || vec_len == 0) {
fprintf(stderr, " Error: N == 0 || temperature == 0 || vec_len == 0. N=%f, temperature=%f, vec_len=%f \n", N, temperature, vec_len);
getchar();
}
const float mult = 1 / ((N - 1) * temperature * vec_len);
for (j = 0; j < num_of_samples; ++j) {
//if (i != j && (i/2) == (j/2)) {
if (i != j && labels[i] == labels[j]) {
//printf(" i = %d, j = %d, num_of_samples = %d, labels[i] = %d, labels[j] = %d \n",
// i, j, num_of_samples, labels[i], labels[j]);
const float sim = cos_sim[i*num_of_samples + j]; // cosine_similarity(z[i], z[j], feature_size);
const float P = p_constrastive[i*num_of_samples + j]; // P_constrastive(i, j, labels, num_of_samples, z, feature_size, temperature, cos_sim);
//const float custom_pos_mult = 1 - sim;
int m;
for (m = 0; m < feature_size; ++m) {
const float d = mult*(sim * z[i][m] - z[j][m]) * (1 - P); // good
//const float d = mult*(sim * z[j][m] - z[j][m]) * (1 - P); // bad
// printf(" pos: z[j][m] = %f, z[i][m] = %f, d = %f, sim = %f \n", z[j][m], z[i][m], d, sim);
const int out_i = m * wh;
delta[out_i] -= d;
}
}
}
}
// i - id of the current sample in mini_batch
// labels[num_of_samples] - array with class_id for each sample in the current mini_batch
// z[feature_size][num_of_samples] - array of arrays with contrastive features (output of conv-layer, f.e. 128 floats for each sample)
// delta[feature_size] - array with deltas for backpropagation
// temperature - scalar temperature param (temperature > 0), f.e. temperature = 0.07: Supervised Contrastive Learning
void grad_contrastive_loss_negative(size_t i, int *labels, size_t num_of_samples, float **z, unsigned int feature_size, float temperature, float *cos_sim, float *p_constrastive, float *delta, int wh)
{
const float vec_len = math_vector_length(z[i], feature_size);
size_t j;
float N = 0;
for (j = 0; j < num_of_samples; ++j) {
if (labels[i] == labels[j]) N++;
}
if (N == 0 || temperature == 0 || vec_len == 0) {
fprintf(stderr, " Error: N == 0 || temperature == 0 || vec_len == 0. N=%f, temperature=%f, vec_len=%f \n", N, temperature, vec_len);
getchar();
}
const float mult = 1 / ((N - 1) * temperature * vec_len);
for (j = 0; j < num_of_samples; ++j) {
//if (i != j && (i/2) == (j/2)) {
if (i != j && labels[i] == labels[j]) {
size_t k;
for (k = 0; k < num_of_samples; ++k) {
//if (k != i && k != j && labels[k] != labels[i]) {
if (k != i && k != j && labels[k] >= 0) {
const float sim = cos_sim[i*num_of_samples + k]; // cosine_similarity(z[i], z[k], feature_size);
const float P = p_constrastive[i*num_of_samples + k]; // P_constrastive(i, k, labels, num_of_samples, z, feature_size, temperature, cos_sim);
//const float custom_pos_mult = 1 + sim;
int m;
for (m = 0; m < feature_size; ++m) {
const float d = mult*(z[k][m] - sim * z[i][m]) * P; // good
//const float d = mult*(z[k][m] - sim * z[k][m]) * P; // bad
//printf(" neg: z[k][m] = %f, z[i][m] = %f, d = %f, sim = %f \n", z[k][m], z[i][m], d, sim);
const int out_i = m * wh;
delta[out_i] -= d;
}
}
}
}
}
} |
GB_binop__bget_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__bget_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int16)
// C=scalar+B GB (_bind1st__bget_int16)
// C=scalar+B' GB (_bind1st_tran__bget_int16)
// C=A+scalar GB (_bind2nd__bget_int16)
// C=A'+scalar GB (_bind2nd_tran__bget_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_BITGET (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITGET (x, y, int16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT16 || GxB_NO_BGET_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bget_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, int16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bget_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, int16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hello.c | #include <stdio.h>
#include<omp.h>
int main ()
{
#pragma omp parallel
{
int i = omp_get_thread_num();
printf("Hello %d ",i);
printf("world! %d \n",i);
}
}
|
residualbased_newton_raphson_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
// Strategies
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// Utilities
#include "utilities/variable_utils.h"
#include "utilities/color_utilities.h"
#include "utilities/math_utils.h"
#include "custom_python/process_factory_utility.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonContactStrategy
* @ingroup ContactStructuralMechanicsApplication
* @brief Contact Newton Raphson class
* @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonContactStrategy :
public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ProcessFactoryUtility::Pointer ProcessesListType;
typedef std::size_t IndexType;
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param p_scheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer p_scheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param p_scheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer p_scheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~ResidualBasedNewtonRaphsonContactStrategy() override
= default;
//******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************//
//***********************************************************************************//
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
// Auxiliar zero array
const array_1d<double, 3> zero_array = ZeroVector(3);
// Set to zero the weighted gap
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
const bool frictional = r_model_part.Is(SLIP);
// We predict contact pressure in case of contact problem
if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array);
if (frictional) {
VariableUtils().SetVariable(WEIGHTED_SLIP, zero_array, nodes_array);
}
// Compute the current gap
ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
// We predict a contact pressure
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const std::size_t step = r_process_info[STEP];
if (step == 1) {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT);
}
} else {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1));
}
}
}
// BaseType::Predict(); // NOTE: May cause problems in dynamics!!!
//
// // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated
// ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
//
// // We predict contact pressure in case of contact problem
// if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
// VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array);
//
// // Compute the current gap
// ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
//
// // We predict a contact pressure
// ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
// const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY];
//
// // We iterate over the nodes
// bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true;
//
// #pragma omp parallel for
// for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
// auto it_node = nodes_array.begin() + i;
//
// const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
//
// const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter;
//
// if (current_gap < 0.0) {
// it_node->Set(ACTIVE, true);
// if (is_components) {
// it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap;
// } else {
// const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL);
// it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal;
// }
// }
// }
// }
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
BaseType::Initialize();
mFinalizeWasPerformed = false;
// Initializing NL_ITERATION_NUMBER
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
r_process_info[NL_ITERATION_NUMBER] = 1;
KRATOS_CATCH("");
}
/**
* @brief The problem of interest is solved.
* @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(),
* SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
*/
double Solve() override
{
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
this->SolveSolutionStep();
this->FinalizeSolutionStep();
// TODO: Add something if necessary
return 0.0;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
BaseType::InitializeSolutionStep();
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
mFinalizeWasPerformed = false;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* after solving the solution step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
if (mFinalizeWasPerformed == false) {
BaseType::FinalizeSolutionStep();
// To avoid compute twice the FinalizeSolutionStep
mFinalizeWasPerformed = true;
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
KRATOS_TRY;
// bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations
// bool is_converged = BaseSolveSolutionStep(); // Direct solution
bool is_converged = false;
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
if (r_model_part.IsNot(INTERACTION)) {
// We get the system
TSystemMatrixType& A = *BaseType::mpA;
TSystemVectorType& Dx = *BaseType::mpDx;
TSystemVectorType& b = *BaseType::mpb;
// We get the process info
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
int inner_iteration = 0;
while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) {
++inner_iteration;
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;;
}
// We solve one loop
r_process_info[NL_ITERATION_NUMBER] = 1;
r_process_info[INNER_LOOP_ITERATION] = inner_iteration;
is_converged = BaseSolveSolutionStep();
// We check the convergence
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b);
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl;
else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl;
}
}
} else {
// We compute the base loop
r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1;
is_converged = BaseSolveSolutionStep();
}
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (!is_converged) {
is_converged = AdaptativeStep();
}
}
return is_converged;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Parameters mThisParameters; /// The configuration parameters
// ADAPTATIVE STRATEGY PARAMETERS
bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed
ProcessesListType mpMyProcesses; /// The processes list
ProcessesListType mpPostProcesses; /// The post processes list
// OTHER PARAMETERS
int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria
///@}
///@name Protected Operators
///@{
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool BaseSolveSolutionStep()
{
KRATOS_TRY;
// Pointers needed in the solution
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
typename TSchemeType::Pointer p_scheme = BaseType::GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver();
auto& r_dof_set = p_builder_and_solver->GetDofSet();
TSystemMatrixType& rA = *BaseType::mpA;
TSystemVectorType& rDx = *BaseType::mpDx;
TSystemVectorType& rb = *BaseType::mpb;
// Initializing the parameters of the Newton-Raphson cicle
IndexType iteration_number = 1;
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
bool is_converged = false;
bool residual_is_updated = false;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
// We do a geometry check before solve the system for first time
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
// Function to perform the building and the solving phase.
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx); //Dx=0.00;
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
if (is_converged) {
// Initialisation of the convergence criteria
BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb);
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
// Iteration Cicle... performed only for NonLinearProblems
while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) {
//setting the number of iteration
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb);
//call the linear system solver to find the correction mDx for the
//it is not called if there is no system to solve
if (SparseSpaceType::Size(rDx) != 0) {
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) {
if( BaseType::GetKeepSystemConstantDuringIterations() == false) {
//A = 0.00;
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
}
else {
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb);
}
} else {
KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb);
BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb);
residual_is_updated = false;
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb);
residual_is_updated = true;
//std::cout << "mb is calculated" << std::endl;
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb);
}
}
// Plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0)
MaxIterationsExceeded();
// Recalculate residual if needed
// (note that some convergence criteria need it to be recalculated)
if (residual_is_updated == false) {
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb);
}
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb);
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief This method performs the adaptative step
*/
bool AdaptativeStep()
{
KRATOS_TRY;
bool is_converged = false;
// Plots a warning if the maximum number of iterations is exceeded
if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl;
if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl;
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later
int split_number = 0;
// We iterate until we reach the convergence or we split more than desired
while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) {
// Expliting time step as a way to try improve the convergence
split_number += 1;
double aux_delta_time, current_time;
const double aux_time = SplitTimeStep(aux_delta_time, current_time);
current_time += aux_delta_time;
bool inside_the_split_is_converged = false;
IndexType inner_iteration = 0;
while (current_time <= aux_time) {
inner_iteration += 1;
r_process_info[STEP] += 1;
if (inner_iteration == 1) {
if (StrategyBaseType::MoveMeshFlag())
UnMoveMesh();
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
it_node->OverwriteSolutionStepData(1, 0);
// it_node->OverwriteSolutionStepData(2, 1);
}
r_process_info.SetCurrentTime(current_time); // Reduces the time step
FinalizeSolutionStep();
} else {
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i)
(nodes_array.begin() + i)->CloneSolutionStepData();
r_process_info.CloneSolutionStepInfo();
r_process_info.ClearHistory(r_model_part.GetBufferSize());
r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step
}
// We execute the processes before the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteInitializeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteInitializeSolutionStep();
// In order to initialize again everything
BaseType::mInitializeWasPerformed = false;
mFinalizeWasPerformed = false;
// We repeat the solve with the new DELTA_TIME
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
inside_the_split_is_converged = BaseType::SolveSolutionStep();
this->FinalizeSolutionStep();
// We execute the processes after the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteFinalizeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteFinalizeSolutionStep();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteBeforeOutputStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->PrintOutput();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteAfterOutputStep();
current_time += aux_delta_time;
}
if (inside_the_split_is_converged)
is_converged = true;
}
// Plots a warning if the maximum number of iterations and splits are exceeded
if (is_converged == false)
MaxIterationsAndSplitsExceeded();
// Restoring original DELTA_TIME
r_process_info[DELTA_TIME] = original_delta_time;
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief Here the database is updated
* @param A The LHS matrix
* @param Dx The increment of solution after solving system
* @param b The RHS vector
* @param MoveMesh The flag that tells if the mesh should be moved
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
BaseType::UpdateDatabase(A,Dx,b,MoveMesh);
// TODO: Add something if necessary
}
/**
* @brief his method checks if there is no element inverted
*/
bool CheckGeometryInverted()
{
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
bool inverted_element = false;
ElementsArrayType& elements_array = r_model_part.Elements();
// NOT OMP
for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) {
auto it_elem = elements_array.begin() + i;
auto& geom = it_elem->GetGeometry();
if (geom.DeterminantOfJacobian(0) < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(geom.DeterminantOfJacobian(0))
}
return true;
}
// We check now the deformation gradient
std::vector<Matrix> deformation_gradient_matrices;
it_elem->CalculateOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info);
for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) {
const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]);
if (det_f < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(det_f)
}
return true;
}
}
}
return inverted_element;
}
/**
* @brief Here the time step is splitted
* @param AuxDeltaTime The new delta time to be considered
* @param CurrentTime The current time
* @return The destination time
*/
double SplitTimeStep(
double& AuxDeltaTime,
double& CurrentTime
)
{
KRATOS_TRY;
const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME];
CurrentTime = aux_time - AuxDeltaTime;
StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one
AuxDeltaTime /= mThisParameters["split_factor"].GetDouble();
StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time
CoutSplittingTime(AuxDeltaTime, aux_time);
return aux_time;
KRATOS_CATCH("");
}
/**
* This method moves bak the mesh to the previous position
*/
void UnMoveMesh()
{
KRATOS_TRY;
if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false)
KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl;
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates();
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
}
KRATOS_CATCH("");
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters()
{
Parameters default_parameters = Parameters(R"(
{
"adaptative_strategy" : false,
"split_factor" : 10.0,
"max_number_splits" : 3,
"inner_loop_iterations" : 5
})" );
return default_parameters;
}
/**
* @brief This method prints information after solving the problem
*/
void CoutSolvingProblem()
{
if (mConvergenceCriteriaEchoLevel != 0) {
std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl;
}
}
/**
* @brief This method prints information after split the increment of time
* @param AuxDeltaTime The new time step to be considered
* @param AuxTime The destination time
*/
void CoutSplittingTime(
const double AuxDeltaTime,
const double AuxTime
)
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
std::cout.precision(4);
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl;
std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl;
std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl;
std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations
*/
void MaxIterationsExceeded() override
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations and splits
*/
void MaxIterationsAndSplitsExceeded()
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedNewtonRaphsonContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
|
needle.c | #define LIMIT -999
#define TRACE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "openacc.h"
//#define OPENMP
//#define NUM_THREAD 4
#define DEBUG
#ifndef VERIFICATION
#define VERIFICATION 1
#endif
#ifndef _MAX_ROWS_
#define _MAX_ROWS_ 2049
#ifdef _OPENARC_
#pragma openarc #define _MAX_ROWS_ 2049
#endif
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
int max_rows, max_cols, penalty;
int omp_num_threads;
double gettime() {
struct timeval t;
gettimeofday(&t,0);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
double start_time, end_time;
start_time = gettime();
runTest( argc, argv);
end_time = gettime();
printf("Total Execution Time %lf sec. \n", end_time - start_time);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> <num_threads>\n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
fprintf(stderr, "\t<num_threads> - no. of threads\n");
exit(1);
}
void mainComp(int input_itemsets[_MAX_ROWS_*_MAX_ROWS_], int referrence[_MAX_ROWS_*_MAX_ROWS_])
{
int i, idx, index;
/////////////////////////////////
// Used for inlining maximum() //
/////////////////////////////////
int a, b, c, k;
long int iSum;
#pragma acc data \
copy(input_itemsets[0:_MAX_ROWS_*_MAX_ROWS_]) \
copyin(referrence[0:_MAX_ROWS_*_MAX_ROWS_])
{
for( i = 0 ; i < max_cols-2 ; i++){
#pragma acc kernels loop gang worker independent
for( idx = 0 ; idx <= i ; idx++){
index = (idx + 1) * max_cols + (i + 1 - idx);
// input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
printf("Processing bottom-right matrix\n");
//Compute bottom-right matrix
for( i = max_cols - 4 ; i >= 0 ; i--){
#pragma acc kernels loop gang worker independent
for( idx = 0 ; idx <= i ; idx++){
index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ;
//input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
}
//Fake computation to measure timing of unified memory version.
iSum = 0;
for( i=0; i<_MAX_ROWS_*_MAX_ROWS_; i++ )
iSum += input_itemsets[i];
printf("Sum of input_itemsets: %ld\n", iSum);
}
void mainCompCPU(int input_itemsets[_MAX_ROWS_*_MAX_ROWS_], int referrence[_MAX_ROWS_*_MAX_ROWS_])
{
int i, idx, index;
/////////////////////////////////
// Used for inlining maximum() //
/////////////////////////////////
int a, b, c, k;
for( i = 0 ; i < max_cols-2 ; i++){
#ifdef _OPENMP
//omp_set_num_threads(omp_num_threads);
#pragma omp parallel for shared(input_itemsets) firstprivate(i,max_cols,penalty) private(idx, index)
#endif
for( idx = 0 ; idx <= i ; idx++){
index = (idx + 1) * max_cols + (i + 1 - idx);
// input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
//Compute bottom-right matrix
for( i = max_cols - 4 ; i >= 0 ; i--){
#ifdef _OPENMP
//omp_set_num_threads(omp_num_threads);
#pragma omp parallel for shared(input_itemsets) firstprivate(i,max_cols,penalty) private(idx, index)
#endif
for( idx = 0 ; idx <= i ; idx++){
index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ;
//input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index],
// input_itemsets[index-1] - penalty,
// input_itemsets[index-max_cols] - penalty);
a = input_itemsets[index-1-max_cols]+ referrence[index];
b = input_itemsets[index-1] - penalty;
c = input_itemsets[index-max_cols] - penalty;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
input_itemsets[index] = c;
else
input_itemsets[index] = k;
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int *input_itemsets, *output_itemsets, *referrence;
int i,j;
#ifdef DEBUG
double start_time, end_time, init_time;
#endif
#ifdef TRACE
FILE *fp;
#endif
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 4)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
omp_num_threads = atoi(argv[3]);
if( max_rows != (_MAX_ROWS_-1) ) {
printf("Wrong value (%d) for macro, _MAX_ROWS_!\n", _MAX_ROWS_);
return;
}
}
else{
usage(argc, argv);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
#ifdef DEBUG
start_time = gettime();
#endif
//referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
//input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
referrence = (int *)acc_create_unified(NULL, max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)acc_create_unified(NULL, max_rows * max_cols * sizeof(int) );
#ifdef DEBUG
init_time = gettime() - start_time;
#endif
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (i = 0 ; i < max_cols; i++){
for (j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (i = 1 ; i < max_cols; i++){
for (j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
//Compute top-left matrix
printf("Num of threads: %d\n", omp_num_threads);
printf("Processing top-left matrix\n");
#ifdef DEBUG
start_time = gettime();
#endif
mainComp(input_itemsets, referrence);
#ifdef DEBUG
end_time = gettime();
printf("Accelerator Elapsed Time = %lf sec. \n", end_time - start_time + init_time);
#endif
if(VERIFICATION) {
int *input_itemsets_CPU;
double deltaL2Norm = 0;
double nonAccL2Norm = 0;
double L2Norm;
input_itemsets_CPU = (int *)malloc( max_rows * max_cols * sizeof(int) );
srand ( 7 );
for (i = 0 ; i < max_cols; i++){
for (j = 0 ; j < max_rows; j++){
input_itemsets_CPU[i*max_cols+j] = 0;
}
}
for( i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets_CPU[i*max_cols] = rand() % 10 + 1;
}
for( j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets_CPU[j] = rand() % 10 + 1;
}
for( i = 1; i< max_rows ; i++)
input_itemsets_CPU[i*max_cols] = -i * penalty;
for( j = 1; j< max_cols ; j++)
input_itemsets_CPU[j] = -j * penalty;
#ifdef DEBUG
start_time = gettime();
#endif
mainCompCPU(input_itemsets_CPU, referrence);
#ifdef DEBUG
end_time = gettime();
printf("Main Comp. Time CPU = %lf sec. \n", end_time - start_time);
#endif
for (i = 0; i < max_rows * max_cols; ++i) {
double d = input_itemsets_CPU[i] - input_itemsets[i];
deltaL2Norm += d * d;
nonAccL2Norm += input_itemsets_CPU[i] * input_itemsets_CPU[i];
}
L2Norm = sqrt(deltaL2Norm / nonAccL2Norm);
if (L2Norm < 1e-9) {
printf("Verification: Successful\n");
} else {
printf("Verification: Failed\n");
}
printf("L2Norm = %lf\n", L2Norm);
free(input_itemsets_CPU);
}
#ifdef TRACE
printf("print traceback value CPU:\n");
if( (fp = fopen("nwTrace.txt", "w")) == 0 ) {
printf("Can not open %s\n", "nwTrace.txt");
return;
}
//int i, j;
for (i = j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fp, "%d ", input_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = input_itemsets[(i - 1) * max_cols + j - 1];
w = input_itemsets[ i * max_cols + j - 1 ];
n = input_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = input_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = input_itemsets[(i - 1) * max_cols + j];
}
else{
}
traceback = maximum(nw, w, n);
fprintf(fp, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fprintf(fp, "\n");
fclose(fp);
#endif
}
|
GB_binop__ne_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint16)
// A*D function (colscale): GB (_AxD__ne_uint16)
// D*A function (rowscale): GB (_DxB__ne_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint16)
// C=scalar+B GB (_bind1st__ne_uint16)
// C=scalar+B' GB (_bind1st_tran__ne_uint16)
// C=A+scalar GB (_bind2nd__ne_uint16)
// C=A'+scalar GB (_bind2nd_tran__ne_uint16)
// C type: bool
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT16 || GxB_NO_NE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
general_basis_get_amp.h | #ifndef _GENERAL_BASIS_GET_AMP_H
#define _GENERAL_BASIS_GET_AMP_H
//#include <limits>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "misc.h"
#include "openmp.h"
//#include <complex>
namespace basis_general {
template<class I,class P=signed char>
std::complex<double> get_amp_rep(general_basis_core<I,P> *B,
const int nt,
I r, // start out with representative state and iterate over all transofmrations.
const I s, // target states to find amplitude of
double k = 0.0,
P sign = 1,
const int depth = 0
)
{
if(nt<=0){
return 1.0;
}
std::complex<double> phase_factor = 0.0;
const int per = B->pers[depth];
const double q = (2.0*M_PI*B->qs[depth])/per;
if(depth < nt-1){
for(int j=0;j<per;j++){
phase_factor += get_amp_rep(B,nt,r,s,k,sign,depth+1);
k += q;
r = B->map_state(r,depth,sign);
}
}
else{
for(int j=0;j<per;j++){
if(r==s){
phase_factor += double(sign)*std::exp(std::complex<double>(0,-k));
}
k += q;
r = B->map_state(r,depth,sign);
}
}
return phase_factor;
}
template<class I,class J,class P=signed char>
int get_amp_general(general_basis_core<I,P> *B,
I s[], // input states in the full basis
J out[], // state amplitudes of state s (full basis)
const npy_intp Ns // length of above arrays (should be the same)
)
{
int err=0;
double per_factor = 1.0;
int q_sum = 0; // sum of quantum numbers
const int nt = B->get_nt();
for(int i=0;i<nt;i++){
per_factor *= B->pers[i];
q_sum += std::abs(B->qs[i]);
}
const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); // check_state has variable workload
if(q_sum > 0 || B->fermionic){ // a non-zero quantum number, or fermionic basis => need a nontrivial phase_factor
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
int g[__GENERAL_BASIS_CORE__max_nt];
P sign=1;
I ss=s[i];
I r = B->ref_state(ss,g,sign);
double norm_r = B->check_state(r);
s[i] = r; // update state with representative
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
phase_factor = get_amp_rep(B,nt,r,ss);
out_tmp = phase_factor/std::sqrt(norm_r * per_factor);
}
else{
out_tmp = 0.0;
}
int local_err = type_checks(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
int g[__GENERAL_BASIS_CORE__max_nt];
P sign=1;
I ss=s[i];
I r = B->ref_state(ss,g,sign);
double norm_r = B->check_state(r);
s[i] = r; // update state with representative
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
//phase_factor = get_amp_rep(B,nt,r,ss);
out_tmp = std::sqrt(norm_r/per_factor);
}
else{
out_tmp = 0.0;
}
int local_err = type_checks(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
return err;
}
// same as get_amp_rep, but w/o calling ref_state and check_state
template<class I,class J,class P=signed char>
int get_amp_general_light(general_basis_core<I,P> *B,
I s[], // input states in the symmetry-reduced basis
J out[], // state amplitudes of state s (symmetry-reduced basis)
const npy_intp Ns // length of above arrays (should be the same)
)
{
int err=0;
double per_factor = 1.0;
int q_sum = 0; // sum of quantum numbers
const int nt = B->get_nt();
for(int i=0;i<nt;i++){
per_factor *= B->pers[i];
q_sum += std::abs(B->qs[i]);
}
const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); // check_state has variable workload
if(q_sum > 0 || B->fermionic){ // a non-zero quantum number, or fermionic basis => need a nontrivial phase_factor
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
I ss=s[i];
double norm_r = B->check_state(ss);
phase_factor = get_amp_rep(B,nt,ss,ss);
out_tmp = phase_factor/std::sqrt(norm_r * per_factor);
int local_err = type_checks(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
double norm_r = B->check_state(s[i]);
out_tmp = std::sqrt(norm_r/per_factor);
int local_err = type_checks(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
return err;
}
}
#endif
|
SybasePROP_fmt_plug.c | /* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia
* <dhiru [at] openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for reversing this algorithm go to Marcel Major, Frank Benhamou
* and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!).
*
* [1] http://www.nes.fr/securitylab/?p=1128 (in French!)
*
* [2] https://hacktivity.com/hu/letoltesek/archivum/57/
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sybaseprop;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sybaseprop);
#else
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "syb-prop_repro.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2048 // xxx
static int omp_t = 1;
#endif
#include "memdbg.h"
#define BLOCK_SIZE 8
#define FORMAT_LABEL "Sybase-PROP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 56)
#define PREFIX_VALUE "0x"
#define PREFIX_LENGTH 2
#define BINARY_SIZE 56 / 2
#define BINARY_ALIGN 4
#define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument
#define SALT_SIZE_HEX 2
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests SybasePROP_tests[] = {
{"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"},
{"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"},
{NULL}
};
static unsigned char saved_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext + PREFIX_LENGTH;
if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH))
return 0;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH)
return 0;
while (*p)
if (atoi16[ARCH_INDEX(*p++)] == 0x7f)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05"
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p = ciphertext + PREFIX_LENGTH;
static unsigned char salt;
salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
return (void*)&salt;
}
static void set_salt(void *salt)
{
saved_salt = ((unsigned char*)salt)[0];
}
static void set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
int g_seed = 0x3f;
struct JtR_FEAL8_CTX ctx;
generate_hash((unsigned char*)saved_key[index], saved_salt,
(unsigned char*)crypt_out[index], &g_seed, &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
struct fmt_main fmt_sybaseprop = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
SybasePROP_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
typedef struct _LCMSInfo
{
ColorspaceType
colorspace;
cmsUInt32Number
type;
size_t
channels;
cmsHPROFILE
profile;
int
intent;
double
scale,
translate;
void
**magick_restrict pixels;
} LCMSInfo;
#if LCMS_VERSION < 2060
static void* cmsGetContextUserData(cmsContext ContextID)
{
return(ContextID);
}
static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData)
{
magick_unreferenced(Plugin);
return((cmsContext) UserData);
}
static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID),
cmsLogErrorHandlerFunction Fn)
{
magick_unreferenced(ContextID);
cmsSetLogErrorHandler(Fn);
}
static void cmsDeleteContext(cmsContext magick_unused(ContextID))
{
magick_unreferenced(ContextID);
}
#endif
static void **DestroyPixelThreadSet(void **pixels)
{
register ssize_t
i;
if (pixels == (void **) NULL)
return((void **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (void *) NULL)
pixels[i]=RelinquishMagickMemory(pixels[i]);
pixels=(void **) RelinquishMagickMemory(pixels);
return(pixels);
}
static void **AcquirePixelThreadSet(const size_t columns,
const size_t channels,MagickBooleanType highres)
{
register ssize_t
i;
size_t
number_threads;
size_t
size;
void
**pixels;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (void **) NULL)
return((void **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
size=sizeof(double);
if (highres == MagickFalse)
size=sizeof(Quantum);
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=AcquireQuantumMemory(columns,channels*size);
if (pixels[i] == (void *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info,
const LCMSInfo *target_info,const cmsUInt32Number flags,
cmsContext cms_context)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile,
source_info->type,target_info->profile,target_info->type,
target_info->intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context);
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s', %s (#%u)",image->filename,
message != (char *) NULL ? message : "no message",severity);
}
static void TransformDoublePixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
#define GetLCMSPixel(source_info,pixel) \
(source_info->scale*QuantumScale*(pixel)+source_info->translate)
#define SetLCMSPixel(target_info,pixel) \
ClampToQuantum(target_info->scale*QuantumRange*(pixel)+target_info->translate)
register double
*p;
register ssize_t
x;
p=(double *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetLCMSPixel(source_info,GetPixelRed(image,q));
if (source_info->channels > 1)
{
*p++=GetLCMSPixel(source_info,GetPixelGreen(image,q));
*p++=GetLCMSPixel(source_info,GetPixelBlue(image,q));
}
if (source_info->channels > 3)
*p++=GetLCMSPixel(source_info,GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(double *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,SetLCMSPixel(target_info,*p),q);
else
SetPixelRed(image,SetLCMSPixel(target_info,*p),q);
p++;
if (target_info->channels > 1)
{
SetPixelGreen(image,SetLCMSPixel(target_info,*p),q);
p++;
SetPixelBlue(image,SetLCMSPixel(target_info,*p),q);
p++;
}
if (target_info->channels > 3)
{
SetPixelBlack(image,SetLCMSPixel(target_info,*p),q);
p++;
}
q+=GetPixelChannels(image);
}
}
static void TransformQuantumPixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
register Quantum
*p;
register ssize_t
x;
p=(Quantum *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetPixelRed(image,q);
if (source_info->channels > 1)
{
*p++=GetPixelGreen(image,q);
*p++=GetPixelBlue(image,q);
}
if (source_info->channels > 3)
*p++=GetPixelBlack(image,q);
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(Quantum *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,*p++,q);
else
SetPixelRed(image,*p++,q);
if (target_info->channels > 1)
{
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
}
if (target_info->channels > 3)
SetPixelBlack(image,*p++,q);
q+=GetPixelChannels(image);
}
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#ifndef TYPE_XYZ_8
#define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1))
#endif
#define ThrowProfileException(severity,tag,context) \
{ \
if (profile != (StringInfo *) NULL) \
profile=DestroyStringInfo(profile); \
if (cms_context != (cmsContext) NULL) \
cmsDeleteContext(cms_context); \
if (source_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_info.profile); \
if (target_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_info.profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsContext
cms_context;
CMSExceptionInfo
cms_exception;
LCMSInfo
source_info,
target_info;
/*
Transform pixel colors as defined by the color profiles.
*/
cms_exception.image=image;
cms_exception.exception=exception;
cms_context=cmsCreateContext(NULL,&cms_exception);
if (cms_context == (cmsContext) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler);
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_info.profile == (cmsHPROFILE) NULL)
{
cmsDeleteContext(cms_context);
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
cmsColorSpaceSignature
signature;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
const char
*artifact;
#endif
MagickBooleanType
highres;
MagickOffsetType
progress;
ssize_t
y;
target_info.profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_info.profile=source_info.profile;
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_info.profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
highres=MagickTrue;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
artifact=GetImageArtifact(image,"profile:highres-transform");
if (IsStringFalse(artifact) != MagickFalse)
highres=MagickFalse;
#endif
source_info.scale=1.0;
source_info.translate=0.0;
source_info.colorspace=sRGBColorspace;
source_info.channels=3;
switch (cmsGetColorSpace(source_info.profile))
{
case cmsSigCmykData:
{
source_info.colorspace=CMYKColorspace;
source_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_info.scale=100.0;
}
break;
}
case cmsSigGrayData:
{
source_info.colorspace=GRAYColorspace;
source_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
source_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
source_info.scale=100.0;
source_info.translate=(-0.5);
}
break;
}
case cmsSigRgbData:
{
source_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
source_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
signature=cmsGetPCS(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_info.profile);
target_info.scale=1.0;
target_info.translate=0.0;
target_info.channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_info.colorspace=CMYKColorspace;
target_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_info.scale=0.01;
}
break;
}
case cmsSigGrayData:
{
target_info.colorspace=GRAYColorspace;
target_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
target_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
target_info.scale=0.01;
target_info.translate=0.5;
}
break;
}
case cmsSigRgbData:
{
target_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
target_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent:
{
target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC;
break;
}
case PerceptualIntent:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
case RelativeIntent:
{
target_info.intent=INTENT_RELATIVE_COLORIMETRIC;
break;
}
case SaturationIntent:
{
target_info.intent=INTENT_SATURATION;
break;
}
default:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(&source_info,&target_info,
flags,cms_context);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_info.pixels=AcquirePixelThreadSet(image->columns,
source_info.channels,highres);
target_info.pixels=AcquirePixelThreadSet(image->columns,
target_info.channels,highres);
if ((source_info.pixels == (void **) NULL) ||
(target_info.pixels == (void **) NULL))
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if (source_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
return(MagickFalse);
}
if (target_info.colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_info.colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (highres != MagickFalse)
TransformDoublePixels(id,image,&source_info,&target_info,transform,q);
else
TransformQuantumPixels(id,image,&source_info,&target_info,transform,q);
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_info.colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
}
(void) cmsCloseProfile(source_info.profile);
cmsDeleteContext(cms_context);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
#if defined(MAGICKCORE_XML_DELEGATE)
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
return(MagickFalse);
xmlFreeDoc(document);
return(MagickTrue);
}
#else
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
return(MagickFalse);
}
#endif
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(profile) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"CorruptImageProfile","`%s'",name);
return(MagickTrue);
}
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
static void UpdateClipPath(unsigned char *blob,size_t length,
const size_t old_columns,const size_t old_rows,
const RectangleInfo *new_geometry)
{
register ssize_t
i;
ssize_t
knot_count,
selector;
knot_count=0;
while (length != 0)
{
selector=(ssize_t) ReadProfileMSBShort(&blob,&length);
switch (selector)
{
case 0:
case 3:
{
if (knot_count != 0)
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Expected subpath length record.
*/
knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length);
blob+=22;
length-=MagickMin(22,(ssize_t) length);
break;
}
case 1:
case 2:
case 4:
case 5:
{
if (knot_count == 0)
{
/*
Unexpected subpath knot.
*/
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Add sub-path knot
*/
for (i=0; i < 3; i++)
{
double
x,
y;
signed int
xx,
yy;
y=(double) ReadProfileMSBLong(&blob,&length);
y=y*old_rows/4096/4096;
y-=new_geometry->y;
yy=(signed int) ((y*4096*4096)/new_geometry->height);
WriteProfileLong(MSBEndian,(size_t) yy,blob-4);
x=(double) ReadProfileMSBLong(&blob,&length);
x=x*old_columns/4096/4096;
x-=new_geometry->x;
xx=(signed int) ((x*4096*4096)/new_geometry->width);
WriteProfileLong(MSBEndian,(size_t) xx,blob-4);
}
knot_count--;
break;
}
case 6:
case 7:
case 8:
default:
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
}
}
}
MagickPrivate void Update8BIMClipPath(const StringInfo *profile,
const size_t old_columns,const size_t old_rows,
const RectangleInfo *new_geometry)
{
unsigned char
*info;
size_t
length;
ssize_t
count,
id;
assert(profile != (StringInfo *) NULL);
assert(new_geometry != (RectangleInfo *) NULL);
length=GetStringInfoLength(profile);
info=GetStringInfoDatum(profile);
while (length > 0)
{
if (ReadProfileByte(&info,&length) != (unsigned char) '8')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'B')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'I')
continue;
if (ReadProfileByte(&info,&length) != (unsigned char) 'M')
continue;
id=(ssize_t) ReadProfileMSBShort(&info,&length);
count=(ssize_t) ReadProfileByte(&info,&length);
if ((count != 0) && ((size_t) count <= length))
{
info+=count;
length-=count;
}
if ((count & 0x01) == 0)
(void) ReadProfileByte(&info,&length);
count=(ssize_t) ReadProfileMSBLong(&info,&length);
if ((count < 0) || ((size_t) count > length))
{
length=0;
continue;
}
if ((id > 1999) && (id < 2999))
UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry);
info+=count;
length-=MagickMin(count,(ssize_t) length);
}
}
|
GJ.c | #include "GJ.h"
/* --------------------------------------------- IMPLEMENTATIONS -------------------------------------------------- */
/*
* Dada uma matrix e o id do processo, essa função irá dividir as linhas responsáveis pelo processo pelo valor de seus
* respectivos pivots, o que irá fazer que sua diagonal seja igual a um.*/
void pivoting (const int world_rank, const int world_size, Data *data) {
size_t chunk = 0, limit = 0, i = 0, j = 0;
float pivot = 0;
if (NULL != data) {
chunk = line(data)/world_size;
/* Calcula até qual linha o processo designado será responsável por pivotá-la. */
limit = (world_rank+1)*chunk;
/* Cada processo fica reponsável pela sua quantidade de linhas apenas para pivotamento. */
#pragma omp parallel for
for (i = world_rank*chunk; i < limit; i++) {
pivot = matrix(data)[i][i];
/* Caso o pivot seja zero, o sistema no final poderá ser do tipo possível, todavia, indeterminado. */
if (0 != pivot) {
/* Como há interdependência dos dados nesse loop, se pode paralelizar a tarefa de dividir a linha pelo
pivot sem maiores preocupações com dependência dos valores. */
#pragma omp parallel for
for (j = 0; j < col(data); j++) {
matrix(data)[i][j] /= pivot;
}
}
}
}
}
/*
* Transforma um array 2d em um array 1d.
*/
static void matrix_to_vector (Data *data, float *vector) {
size_t i = 0, j = 0, k = 0;
if (NULL != data && NULL != vector) {
for (; i < line(data); i++) {
for (j = 0; j < col(data); j++) {
vector[k++] = matrix(data)[i][j];
}
}
}
}
/*
* Transforma um array 1d em um array 2d.
*/
static void vector_to_matrix (Data *data, float *vector) {
size_t i = 0, j = 0, k = 0;
if (NULL != data && NULL != vector) {
for (; i < line(data); i++) {
for (j = 0; j < col(data); j++) {
matrix(data)[i][j] = vector[k++];
}
}
}
}
/*
* Junta a matrix que possui os pivotamentos anteriormente realizados com o atual.
*/
static void merge_pivoting (const int world_rank, const int world_size, Data *data, float *vector) {
size_t chunk = 0, limit = 0, i = 0, j = 0, k = 0;
if (NULL != data && NULL != vector) {
chunk = line(data)/world_size;
limit = (world_rank+1)*chunk;
k = (world_rank*chunk)*col(data);
for (i = world_rank*chunk; i < limit; i++) {
for (j = 0; j < col(data); j++) {
vector[k++] = matrix(data)[i][j];
}
}
}
}
/*
* Junta todos os pivotamentos realizados om o da matrix que o processo responsável pivotou, em uma estrutura de anel.
* Cada processo fica responsável por pivotar um número de linhas de maneira crescente ao seu ID.
*/
void merge_matrix (const int world_rank, const int world_size, Data *data) {
size_t size = line(data)*col(data);
float *vector = malloc(sizeof(float) * size);
if (NULL != vector) {
/* Uma estrutura de anel para passar as linhas do processo anterior que já foram pivotadas com a do processo
atual e passar para o próximo processo. */
if (is_root(world_rank)) {
matrix_to_vector(data, vector);
MPI_Send(vector, size, MPI_FLOAT, world_rank+1, 0, MPI_COMM_WORLD);
} else if (!is_tail(world_rank, world_size)) {
MPI_Recv(vector, size, MPI_FLOAT, world_rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
/* Juntar a matrix com as linhas pivotadas até este processo com as pivotadas por este processo. */
merge_pivoting(world_rank, world_size, data, vector);
MPI_Send(vector, size, MPI_FLOAT, world_rank+1, 0, MPI_COMM_WORLD);
} else {
/* Quando o processo for o tail, ele apenas juntará toda a informação em uma matriz final que será
utilizada posteriormente para zerar as colunas. */
MPI_Recv(vector, size, MPI_FLOAT, world_rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
merge_pivoting(world_rank, world_size, data, vector);
vector_to_matrix(data, vector);
}
free(vector);
}
}
/* Dada uma matriz e o id do processo, essa função irá fazer troca de linha quando um pivô for zero. Devido a depêndencia */
/* com todas as linhas, a troca será feita no processo principal e ao final comunicada aos outros processos */
void swapping (const int world_rank, const int world_size, Data *data, int order[]) {
size_t i = 0, j = 0, k = 0;
int sizeLine = line(data);
int sizeCol = col(data);
size_t n_elem = line(data)*col(data);
float *buffer = malloc(sizeof(float) * sizeCol);
float *vector = malloc(sizeof(float) * n_elem);
if (NULL != data) {
if (is_root(world_rank)) {
for (i = 0; i < sizeLine; i++){
if (matrix(data)[i][i] == 0){
for (j = 0; j < sizeCol; j++){
buffer[j] = matrix(data)[i][j];
}
for (j = 0; j < sizeLine; j++){
if (matrix(data)[j][i] > 0) {
//printf("trocando linha %d por %d\n", i, j);
order[i] = j;
order[j] = i;
for (k = 0; k < sizeCol; k++){
matrix(data)[i][k] = matrix(data)[j][k];
}
for (k = 0; k < sizeCol; k++){
matrix(data)[j][k] = buffer[k];
}
break;
}
}
}
}
matrix_to_vector(data, vector);
for (i = 0; i < world_size; i++) {
if (i != world_rank) {
MPI_Send(vector, n_elem, MPI_INT, i, 0, MPI_COMM_WORLD);
}
}
} else {
MPI_Recv(vector, n_elem, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
matrix_to_vector(data, vector);
}
}
}
void send_swap (const int world_rank, const int world_size, Data *data){
size_t n_elem = line(data)*col(data);
float *vector = malloc(sizeof(float) * n_elem);
if (is_root(world_rank)) {
matrix_to_vector(data, vector);
}
MPI_Bcast(vector, n_elem, MPI_INT, 0, MPI_COMM_WORLD);
if (world_rank > 0) {
vector_to_matrix(data, vector);
}
}
/*
* Dada a matriz já pivotada, se zera as colunas desses pivots.
*/
void clear_columns (Data *data) {
size_t i = 0, j = 0, k = 0;
float pivot = 0, factor = 0;
if (NULL != data) {
/* Uma linha de cada vez da matrix será selecionada para zerar a coluna do seu pivot nas outras linhas. */
for (; i < line(data); i++) {
pivot = matrix(data)[i][i];
printf("pivo[%d]: %f\n", i, matrix(data)[i][i]);
/* O pivot será zero quando alguma chamada anterior acabou por zerar a sua posição. */
if (0 != pivot) {
/* Seleciona-se todas as outras linhas da matriz para zerar a coluna do pivot. */
for (j = 0; j < line(data); j++) {
/* Não faz sentido procurar zerar a coluna na linha do próprio pivot. */
if (i != j) {
/* Dado o valor da coluna na outra linha, se cacula qual o coeficiente para multiplicar a linha
do pivot para subtrair na linha em que se procura zerar a coluna. */
factor = matrix(data)[j][i]/pivot;
/* Na linha que se busca zerar a coluna, subtrair a linha do pivot. */
#pragma omp parallel for
for (k = 0; k < col(data); k++) {
matrix(data)[j][k] -= factor*matrix(data)[i][k];
}
}
}
}
}
/* Como após zerar-se as colunas os pivots podem mudar de valor, se divide as linhas pelos valores de seus
pivots. */
for (i = 0; i < line(data); i++) {
pivot = matrix(data)[i][i];
/* Atualiza-se o valor do pivot. */
matrix(data)[i][i] /= pivot;
/* Atualiza-se o valor do resultado. */
matrix(data)[i][col(data)-1] /= pivot;
}
}
}
void write_result(Data *data, const int world_rank, int *order){
FILE *answer_file;
size_t i;
if (NULL != data) {
if(is_root(world_rank)){
answer_file = fopen("resultado.txt", "wb");
for(i = 0; i < line(data); i++){
if(order[i] > -1){
//printf("matriz[%d][%d] = %f\n",order[i], line(data), matrix(data)[order[i]][line(data)]);
fprintf(answer_file, "%f\n", matrix(data)[order[i]][line(data)]);
}else{
//printf("matriz[%d][%d] = %f\n",order[i], line(data), matrix(data)[i][line(data)]);
fprintf(answer_file, "%f\n", matrix(data)[i][line(data)]);
}
}
fclose(answer_file);
}
}
}
|
GB_binop__pair_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_fc64)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fc64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GxB_CMPLX(1,0)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GxB_CMPLX(1,0) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_FC64 || GxB_NO_PAIR_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = GxB_CMPLX(1,0) ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = GxB_CMPLX(1,0) ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_unop__frexpe_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__frexpe_fp32_fp32
// op(A') function: GB_unop_tran__frexpe_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = GB_frexpef (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_frexpef (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = GB_frexpef (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FREXPE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__frexpe_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__frexpe_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_dgeadd.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgeadd.c, normal z -> d, Fri Sep 28 17:38:18 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_internal.h"
#include "plasma_types.h"
#include "core_lapack.h"
/****************************************************************************//*
*
* @ingroup core_geadd
*
* Performs an addition of two general matrices similarly to the
* pdgeadd() function from the PBLAS library:
*
* \f[ B = \alpha * op( A ) + \beta * B, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^T, \f]
*
* alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or
* n-by-m matrix depending on the value of transa and B an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^T
*
* @param[in] m
* Number of rows of the matrices op( A ) and B.
* m >= 0.
*
* @param[in] n
* Number of columns of the matrices op( A ) and B.
*
* @param[in] alpha
* Scalar factor of A.
*
* @param[in] A
* Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans
* and m otherwise.
*
* @param[in] lda
* Leading dimension of the array A. lda >= max(1,l), where l is m
* when transa == PlasmaNoTrans and n otherwise.
*
* @param[in] beta
* Scalar factor of B.
*
* @param[in,out] B
* Matrix of size ldb-by-n.
* On exit, B = alpha * op( A ) + beta * B
*
* @param[in] ldb
* Leading dimension of the array B.
* ldb >= max(1,m)
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dgeadd(plasma_enum_t transa,
int m, int n,
double alpha, const double *A, int lda,
double beta, double *B, int ldb)
{
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_coreblas_error("illegal value of transa");
return -1;
}
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -3;
}
if (A == NULL) {
plasma_coreblas_error("NULL A");
return -5;
}
if ((transa == PlasmaNoTrans && lda < imax(1, m) && (m > 0)) ||
(transa != PlasmaNoTrans && lda < imax(1, n) && (n > 0))) {
plasma_coreblas_error("illegal value of lda");
return -6;
}
if (B == NULL) {
plasma_coreblas_error("NULL B");
return -8;
}
if ((ldb < imax(1, m)) && (m > 0)) {
plasma_coreblas_error("illegal value of ldb");
return -9;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
switch (transa) {
case PlasmaConjTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * (A[lda*i+j]);
break;
case PlasmaTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*i+j];
break;
case PlasmaNoTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*j+i];
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_dgeadd(
plasma_enum_t transa,
int m, int n,
double alpha, const double *A, int lda,
double beta, double *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int k = (transa == PlasmaNoTrans) ? n : m;
#pragma omp task depend(in:A[0:lda*k]) \
depend(inout:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess) {
int retval = plasma_core_dgeadd(transa,
m, n,
alpha, A, lda,
beta, B, ldb);
if (retval != PlasmaSuccess) {
plasma_error("core_dgeadd() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
2mm.c | /**
* 2mm.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 0.05
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define NI SIZE
#define NJ SIZE
#define NK SIZE
#define NL SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D) {
int i, j;
for (i = 0; i < NI; i++) {
for (j = 0; j < NK; j++) {
A[i * NI + j] = ((DATA_TYPE)i * j) / NI;
}
}
for (i = 0; i < NK; i++) {
for (j = 0; j < NJ; j++) {
B[i * NK + j] = ((DATA_TYPE)i * (j + 1)) / NJ;
}
}
for (i = 0; i < NL; i++) {
for (j = 0; j < NJ; j++) {
// C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
D[i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK;
}
}
}
int compareResults(DATA_TYPE *E, DATA_TYPE *E_GPU) {
int i, j, fail;
fail = 0;
for (i = 0; i < NL; i++) {
for (j = 0; j < NI; j++) {
if (percentDiff(E[i * NI + j], E_GPU[i * NI + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
void mm2_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D,
DATA_TYPE *E) {
int i, j, k;
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
C[i * NJ + j] = 0.0;
for (k = 0; k < NK; ++k) {
C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
for (i = 0; i < NI; i++) {
for (j = 0; j < NL; j++) {
E[i * NL + j] = 0.0;
for (k = 0; k < NJ; ++k) {
E[i * NL + j] += C[i * NJ + k] * D[k * NL + j];
}
}
}
}
void mm2_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E) {
#pragma omp target teams map(from: E[:NI*NL], C[:NI*NJ]) map(to: A[:NI*NK], B[:NK*NJ], D[:NJ*NL]) device(DEVICE_ID)
{
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < NI; i++) {
for (int j = 0; j < NJ; j++) {
C[i * NJ + j] = 0.0;
for (int k = 0; k < NK; ++k) {
C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < NI; i++) {
for (int j = 0; j < NL; j++) {
E[i * NL + j] = 0.0;
for (int k = 0; k < NJ; ++k) {
E[i * NL + j] += C[i * NJ + k] * D[k * NL + j];
}
}
}
}
}
int main(int argc, char **argv) {
double t_start, t_end, t_start_GPU, t_end_GPU;
int fail = 0;
DATA_TYPE *C;
DATA_TYPE *C_GPU;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *D;
DATA_TYPE *E;
DATA_TYPE *E_GPU;
C = (DATA_TYPE *)calloc(NI * NJ, sizeof(DATA_TYPE));
C_GPU = (DATA_TYPE *)calloc(NI * NJ, sizeof(DATA_TYPE));
A = (DATA_TYPE *)malloc(NI * NK * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(NK * NJ * sizeof(DATA_TYPE));
D = (DATA_TYPE *)malloc(NJ * NL * sizeof(DATA_TYPE));
E = (DATA_TYPE *)calloc(NI * NL, sizeof(DATA_TYPE));
E_GPU = (DATA_TYPE *)calloc(NI * NL, sizeof(DATA_TYPE));
fprintf(stdout,
"<< Linear Algebra: 2 Matrix Multiplications (D=A.B; E=C.D) >>\n");
init_array(A, B, C, D);
t_start_GPU = rtclock();
mm2_OMP(A, B, C_GPU, D, E_GPU);
t_end_GPU = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_GPU - t_start_GPU);
#ifdef RUN_TEST
t_start = rtclock();
mm2_cpu(A, B, C, D, E);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail += compareResults(C, C_GPU);
fail += compareResults(E, E_GPU);
#endif
free(C);
free(A);
free(B);
free(D);
free(E);
free(E_GPU);
return fail;
}
|
residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Collaborators: Vicente Mataix
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <unordered_set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "includes/key_hash.h"
#include "utilities/timer.h"
#include "utilities/variable_utils.h"
#include "includes/kratos_flags.h"
#include "includes/lock_object.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
#include "utilities/builtin_timer.h"
#include "utilities/atomic_utilities.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @tparam TSparseSpace The sparse system considered
* @tparam TDenseSpace The dense system considered
* @tparam TLinearSolver The linear solver considered
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Definition of the flags
KRATOS_DEFINE_LOCAL_FLAG( SILENT_WARNINGS );
// Scaling enum
enum class SCALING_DIAGONAL {NO_SCALING = 0, CONSIDER_NORM_DIAGONAL = 1, CONSIDER_MAX_DIAGONAL = 2, CONSIDER_PRESCRIBED_DIAGONAL = 3};
/// Definition of the pointer
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
// The size_t types
typedef std::size_t SizeType;
typedef std::size_t IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// Additional definitions
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType;
/// DoF types definition
typedef Node<3> NodeType;
typedef typename NodeType::DofType DofType;
typedef typename DofType::Pointer DofPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedBlockBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor.
*/
explicit ResidualBasedBlockBuilderAndSolver(typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = rModelPart.ElementsBegin();
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Contributions to the system
LocalSystemMatrixType lhs_contribution(0, 0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType equation_id;
// Assemble all elements
const auto timer = BuiltinTimer();
#pragma omp parallel firstprivate(nelements, nconditions, lhs_contribution, equation_id )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active)
{
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time LHS: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished parallel building LHS" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
if(mT.size1() != 0) //if there are master-slave constraints
{
//recover solution of the original problem
TSystemVectorType Dxmodified = Dx;
TSparseSpace::Mult(mT, Dxmodified, Dx);
}
//prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
virtual void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
if(rModelPart.MasterSlaveConstraints().size() != 0) {
TSystemVectorType Dxmodified(rb.size());
InternalSystemSolveWithPhysics(rA, Dxmodified, rb, rModelPart);
//recover solution of the original problem
TSparseSpace::Mult(mT, Dxmodified, rDx);
} else {
InternalSystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void InternalSystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00) {
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("ResidualBasedBlockBuilderAndSolver", mOptions.IsNot(SILENT_WARNINGS)) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyConstraints");
ApplyConstraints(pScheme, rModelPart, A, b);
Timer::Stop("ApplyConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time Linearizing with the database at the old iteration
* @details It is ideally the fastest and safer function to use when it is possible to solve just after building
* @param pScheme The pointer to the integration scheme
* @param rModelPart The model part to compute
* @param rA The LHS matrix of the system of equations
* @param rDx The vector of unkowns
* @param rb The RHS vector of the system of equations
* @param MoveMesh tells if the update of the scheme needs to be performed when calling the Update of the scheme
*/
void BuildAndSolveLinearizedOnPreviousIteration(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
const bool MoveMesh
) override
{
KRATOS_INFO_IF("BlockBuilderAndSolver", this->GetEchoLevel() > 0)
<< "Linearizing on Old iteration" << std::endl;
KRATOS_ERROR_IF(rModelPart.GetBufferSize() == 1) << "BlockBuilderAndSolver: \n"
<< "The buffer size needs to be at least 2 in order to use \n"
<< "BuildAndSolveLinearizedOnPreviousIteration \n"
<< "current buffer size for modelpart: " << rModelPart.Name() << std::endl
<< "is :" << rModelPart.GetBufferSize()
<< " Please set IN THE STRATEGY SETTINGS "
<< " UseOldStiffnessInFirstIteration=false " << std::endl;
DofsArrayType fixed_dofs;
for(auto& r_dof : BaseType::mDofSet){
if(r_dof.IsFixed()){
fixed_dofs.push_back(&r_dof);
r_dof.FreeDof();
}
}
//TODO: Here we need to take the vector from other ones because
// We cannot create a trilinos vector without a communicator. To be improved!
TSystemVectorType dx_prediction(rDx);
TSystemVectorType rhs_addition(rb); //we know it is zero here, so we do not need to set it
// Here we bring back the database to before the prediction,
// but we store the prediction increment in dx_prediction.
// The goal is that the stiffness is computed with the
// converged configuration at the end of the previous step.
block_for_each(BaseType::mDofSet, [&](Dof<double>& rDof){
// NOTE: this is initialzed to - the value of dx prediction
dx_prediction[rDof.EquationId()] = -(rDof.GetSolutionStepValue() - rDof.GetSolutionStepValue(1));
});
// Use UpdateDatabase to bring back the solution to how it was at the end of the previous step
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
this->Build(pScheme, rModelPart, rA, rb);
// Put back the prediction into the database
TSparseSpace::InplaceMult(dx_prediction, -1.0); //change sign to dx_prediction
TSparseSpace::UnaliasedAdd(rDx, 1.0, dx_prediction);
// Use UpdateDatabase to bring back the solution
// to where it was taking into account BCs
// it is done here so that constraints are correctly taken into account right after
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
// Apply rb -= A*dx_prediction
TSparseSpace::Mult(rA, dx_prediction, rhs_addition);
TSparseSpace::UnaliasedAdd(rb, -1.0, rhs_addition);
for(auto& dof : fixed_dofs)
dof.FixDof();
if (!rModelPart.MasterSlaveConstraints().empty()) {
this->ApplyConstraints(pScheme, rModelPart, rA, rb);
}
this->ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
this->SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyRHSConstraints");
ApplyRHSConstraints(pScheme, rModelPart, rb);
Timer::Stop("ApplyRHSConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const auto timer = BuiltinTimer();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("BuildRHS");
BuildRHSNoDirichlet(pScheme,rModelPart,b);
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
block_for_each(BaseType::mDofSet, [&](Dof<double>& rDof){
const std::size_t i = rDof.EquationId();
if (rDof.IsFixed())
b[i] = 0.0;
});
Timer::Stop("BuildRHS");
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int number_of_elements = static_cast<int>(r_elements_array.size());
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
unsigned int nthreads = ParallelUtilities::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
/**
* Here we declare three sets.
* - The global set: Contains all the DoF of the system
* - The slave set: The DoF that are not going to be solved, due to MPC formulation
*/
set_type dof_global_set;
dof_global_set.reserve(number_of_elements*20);
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Gets the array of elements from the modeler
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_elements; ++i) {
auto it_elem = r_elements_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of conditions from the modeler
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int number_of_conditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_conditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( *it );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
IndexPartition<std::size_t>(BaseType::mDofSet.size()).for_each([&, this](std::size_t Index){
typename DofsArrayType::iterator dof_iterator = this->mDofSet.begin() + Index;
dof_iterator->SetEquationId(Index);
});
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
ConstructMasterSlaveConstraintsStructure(rModelPart);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
block_for_each(BaseType::mDofSet, [&](Dof<double>& rDof){
const std::size_t i = rDof.EquationId();
rDof.GetSolutionStepReactionValue() = -b[i];
});
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
const std::size_t system_size = rA.size1();
Vector scaling_factors (system_size);
const auto it_dof_iterator_begin = BaseType::mDofSet.begin();
// NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
IndexPartition<std::size_t>(BaseType::mDofSet.size()).for_each([&](std::size_t Index){
auto it_dof_iterator = it_dof_iterator_begin + Index;
if (it_dof_iterator->IsFixed()) {
scaling_factors[Index] = 0.0;
} else {
scaling_factors[Index] = 1.0;
}
});
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// The diagonal considered
mScaleFactor = GetScaleNorm(rModelPart, rA);
// Detect if there is a line of all zeros and set the diagonal to a 1 if this happens
IndexPartition<std::size_t>(system_size).for_each([&](std::size_t Index){
bool empty = true;
const std::size_t col_begin = Arow_indices[Index];
const std::size_t col_end = Arow_indices[Index + 1];
for (std::size_t j = col_begin; j < col_end; ++j) {
if(Avalues[j] != 0.0) {
empty = false;
break;
}
}
if(empty) {
rA(Index, Index) = mScaleFactor;
rb[Index] = 0.0;
}
});
IndexPartition<std::size_t>(system_size).for_each([&](std::size_t Index){
const std::size_t col_begin = Arow_indices[Index];
const std::size_t col_end = Arow_indices[Index+1];
const double k_factor = scaling_factors[Index];
if (k_factor == 0.0) {
// Zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (Acol_indices[j] != Index )
Avalues[j] = 0.0;
// Zero out the RHS
rb[Index] = 0.0;
} else {
// Zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if(scaling_factors[ Acol_indices[j] ] == 0 )
Avalues[j] = 0.0;
}
});
}
/**
* @brief Applies the constraints with master-slave relation matrix (RHS only)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rb The RHS vector
*/
void ApplyRHSConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
// Apply diagonal values on slaves
IndexPartition<std::size_t>(mSlaveIds.size()).for_each([&](std::size_t Index){
const IndexType slave_equation_id = mSlaveIds[Index];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rb[slave_equation_id] = 0.0;
}
});
}
KRATOS_CATCH("")
}
/**
* @brief Applies the constraints with master-slave relation matrix
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
TSystemMatrixType auxiliar_A_matrix(mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
T_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
const double max_diag = GetMaxDiagonal(rA);
// Apply diagonal values on slaves
IndexPartition<std::size_t>(mSlaveIds.size()).for_each([&](std::size_t Index){
const IndexType slave_equation_id = mSlaveIds[Index];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
});
}
KRATOS_CATCH("")
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mSlaveIds.clear();
mMasterIds.clear();
mInactiveSlaveDofs.clear();
mT.resize(0,0,false);
mConstantVector.resize(0,false);
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "block_builder_and_solver",
"block_builder" : true,
"diagonal_values_for_dirichlet_dofs" : "use_max_diagonal",
"silent_warnings" : false
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "block_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mT; /// This is matrix containing the global relation for the constraints
TSystemVectorType mConstantVector; /// This is vector containing the rigid movement of the constraint
std::vector<IndexType> mSlaveIds; /// The equation ids of the slaves
std::vector<IndexType> mMasterIds; /// The equation ids of the master
std::unordered_set<IndexType> mInactiveSlaveDofs; /// The set containing the inactive slave dofs
double mScaleFactor = 1.0; /// The manuallyset scale factor
SCALING_DIAGONAL mScalingDiagonal; /// We identify the scaling considered for the dirichlet dofs
Flags mOptions; /// Some flags used internally
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = rModelPart.Conditions();
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i=0; i<nelements; i++) {
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
element_is_active = (it)->Is(ACTIVE);
}
if(element_is_active) {
//calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
const int nconditions = static_cast<int>(ConditionsArray.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++) {
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
condition_is_active = (it)->Is(ACTIVE);
}
if(condition_is_active) {
//calculate elemental contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
virtual void ConstructMasterSlaveConstraintsStructure(ModelPart& rModelPart)
{
if (rModelPart.MasterSlaveConstraints().size() > 0) {
Timer::Start("ConstraintsRelationMatrixStructure");
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Constraint initial iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin();
std::vector<std::unordered_set<IndexType>> indices(BaseType::mDofSet.size());
std::vector<LockObject> lock_array(indices.size());
#pragma omp parallel firstprivate(slave_dof_list, master_dof_list)
{
Element::EquationIdVectorType slave_ids(3);
Element::EquationIdVectorType master_ids(3);
std::unordered_map<IndexType, std::unordered_set<IndexType>> temp_indices;
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i_const) {
auto it_const = it_const_begin + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if( it_const->IsDefined(ACTIVE) ) {
constraint_is_active = it_const->Is(ACTIVE);
}
if(constraint_is_active) {
it_const->EquationIdVector(slave_ids, master_ids, r_current_process_info);
// Slave DoFs
for (auto &id_i : slave_ids) {
temp_indices[id_i].insert(master_ids.begin(), master_ids.end());
}
}
}
// Merging all the temporal indexes
for (int i = 0; i < static_cast<int>(temp_indices.size()); ++i) {
lock_array[i].lock();
indices[i].insert(temp_indices[i].begin(), temp_indices[i].end());
lock_array[i].unlock();
}
}
mSlaveIds.clear();
mMasterIds.clear();
for (int i = 0; i < static_cast<int>(indices.size()); ++i) {
if (indices[i].size() == 0) // Master dof!
mMasterIds.push_back(i);
else // Slave dof
mSlaveIds.push_back(i);
indices[i].insert(i); // Ensure that the diagonal is there in T
}
// Count the row sizes
std::size_t nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
mT = TSystemMatrixType(indices.size(), indices.size(), nnz);
mConstantVector.resize(indices.size(), false);
double *Tvalues = mT.value_data().begin();
IndexType *Trow_indices = mT.index1_data().begin();
IndexType *Tcol_indices = mT.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Trow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(mT.size1()); i++)
Trow_indices[i + 1] = Trow_indices[i] + indices[i].size();
IndexPartition<std::size_t>(mT.size1()).for_each([&](std::size_t Index){
const IndexType row_begin = Trow_indices[Index];
const IndexType row_end = Trow_indices[Index + 1];
IndexType k = row_begin;
for (auto it = indices[Index].begin(); it != indices[Index].end(); ++it) {
Tcol_indices[k] = *it;
Tvalues[k] = 0.0;
k++;
}
indices[Index].clear(); //deallocating the memory
std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]);
});
mT.set_filled(indices.size() + 1, nnz);
Timer::Stop("ConstraintsRelationMatrixStructure");
}
}
virtual void BuildMasterSlaveConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
TSparseSpace::SetToZero(mT);
TSparseSpace::SetToZero(mConstantVector);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Contributions to the system
Matrix transformation_matrix = LocalSystemMatrixType(0, 0);
Vector constant_vector = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType slave_equation_ids, master_equation_ids;
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// We clear the set
mInactiveSlaveDofs.clear();
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_ids, master_equation_ids)
{
std::unordered_set<IndexType> auxiliar_inactive_slave_dofs;
#pragma omp for schedule(guided, 512)
for (int i_const = 0; i_const < number_of_constraints; ++i_const) {
auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if (it_const->IsDefined(ACTIVE))
constraint_is_active = it_const->Is(ACTIVE);
if (constraint_is_active) {
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
for (IndexType i = 0; i < slave_equation_ids.size(); ++i) {
const IndexType i_global = slave_equation_ids[i];
// Assemble matrix row
AssembleRowContribution(mT, transformation_matrix, i_global, i, master_equation_ids);
// Assemble constant vector
const double constant_value = constant_vector[i];
double& r_value = mConstantVector[i_global];
AtomicAdd(r_value, constant_value);
}
} else { // Taking into account inactive constraints
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
auxiliar_inactive_slave_dofs.insert(slave_equation_ids.begin(), slave_equation_ids.end());
}
}
// We merge all the sets in one thread
#pragma omp critical
{
mInactiveSlaveDofs.insert(auxiliar_inactive_slave_dofs.begin(), auxiliar_inactive_slave_dofs.end());
}
}
// Setting the master dofs into the T and C system
for (auto eq_id : mMasterIds) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : mInactiveSlaveDofs) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
block_for_each(indices, [](std::unordered_set<std::size_t>& rIndices){
rIndices.reserve(40);
});
Element::EquationIdVectorType ids(3, 0);
block_for_each(rModelPart.Elements(), ids, [&](Element& rElem, Element::EquationIdVectorType& rIdsTLS){
pScheme->EquationId(rElem, rIdsTLS, CurrentProcessInfo);
for (std::size_t i = 0; i < rIdsTLS.size(); i++) {
lock_array[rIdsTLS[i]].lock();
auto& row_indices = indices[rIdsTLS[i]];
row_indices.insert(rIdsTLS.begin(), rIdsTLS.end());
lock_array[rIdsTLS[i]].unlock();
}
});
block_for_each(rModelPart.Conditions(), ids, [&](Condition& rCond, Element::EquationIdVectorType& rIdsTLS){
pScheme->EquationId(rCond, rIdsTLS, CurrentProcessInfo);
for (std::size_t i = 0; i < rIdsTLS.size(); i++) {
lock_array[rIdsTLS[i]].lock();
auto& row_indices = indices[rIdsTLS[i]];
row_indices.insert(rIdsTLS.begin(), rIdsTLS.end());
lock_array[rIdsTLS[i]].unlock();
}
});
if (rModelPart.MasterSlaveConstraints().size() != 0) {
struct TLS
{
Element::EquationIdVectorType master_ids = Element::EquationIdVectorType(3,0);
Element::EquationIdVectorType slave_ids = Element::EquationIdVectorType(3,0);
};
TLS tls;
block_for_each(rModelPart.MasterSlaveConstraints(), tls, [&](MasterSlaveConstraint& rConst, TLS& rTls){
rConst.EquationIdVector(rTls.slave_ids, rTls.master_ids, CurrentProcessInfo);
for (std::size_t i = 0; i < rTls.slave_ids.size(); i++) {
lock_array[rTls.slave_ids[i]].lock();
auto& row_indices = indices[rTls.slave_ids[i]];
row_indices.insert(rTls.slave_ids[i]);
lock_array[rTls.slave_ids[i]].unlock();
}
for (std::size_t i = 0; i < rTls.master_ids.size(); i++) {
lock_array[rTls.master_ids[i]].lock();
auto& row_indices = indices[rTls.master_ids[i]];
row_indices.insert(rTls.master_ids[i]);
lock_array[rTls.master_ids[i]].unlock();
}
});
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = CompressedMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
IndexPartition<std::size_t>(A.size1()).for_each([&](std::size_t i){
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
});
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
AtomicAdd(r_a, v_a);
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& rA,
const LocalSystemMatrixType& rLHSContribution,
Element::EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; i_local++) {
const IndexType i_global = rEquationId[i_local];
AssembleRowContribution(rA, rLHSContribution, i_global, i_local, rEquationId);
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
AtomicAdd(b_value, rhs_value);
}
}
inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector);
size_t last_found = EquationId[0];
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,0);
AtomicAdd(r_a, v_a);
//now find all of the other entries
size_t pos = 0;
for (unsigned int j=1; j<EquationId.size(); j++) {
unsigned int id_to_find = EquationId[j];
if(id_to_find > last_found) {
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
} else if(id_to_find < last_found) {
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
} else {
pos = last_pos;
}
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
AtomicAdd(r, v);
last_found = id_to_find;
last_pos = pos;
}
}
/**
* @brief This method returns the scale norm considering for scaling the diagonal
* @param rModelPart The problem model part
* @param rA The LHS matrix
* @return The scale norm
*/
double GetScaleNorm(
ModelPart& rModelPart,
TSystemMatrixType& rA
)
{
switch (mScalingDiagonal) {
case SCALING_DIAGONAL::NO_SCALING:
return 1.0;
case SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL: {
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
KRATOS_ERROR_IF_NOT(r_current_process_info.Has(BUILD_SCALE_FACTOR)) << "Scale factor not defined at process info" << std::endl;
return r_current_process_info.GetValue(BUILD_SCALE_FACTOR);
}
case SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL:
return GetDiagonalNorm(rA)/static_cast<double>(rA.size1());
case SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL:
return GetMaxDiagonal(rA);
// return TSparseSpace::TwoNorm(rA)/static_cast<double>(rA.size1());
default:
return GetMaxDiagonal(rA);
}
}
/**
* @brief This method returns the diagonal norm considering for scaling the diagonal
* @param rA The LHS matrix
* @return The diagonal norm
*/
double GetDiagonalNorm(TSystemMatrixType& rA)
{
double diagonal_norm = 0.0;
diagonal_norm = IndexPartition<std::size_t>(TSparseSpace::Size1(rA)).for_each<SumReduction<double>>([&](std::size_t Index){
return std::pow(rA(Index,Index), 2);
});
return std::sqrt(diagonal_norm);
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetAveragevalueDiagonal(TSystemMatrixType& rA)
{
return 0.5 * (GetMaxDiagonal(rA) + GetMinDiagonal(rA));
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetMaxDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double max_diag = 0.0;
// #pragma omp parallel for reduction(max:max_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// max_diag = std::max(max_diag, std::abs(rA(i,i)));
// }
// return max_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector max_vector(num_threads, 0.0);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii > max_vector[id])
max_vector[id] = abs_value_ii;
}
double max_diag = 0.0;
for(int i = 0; i < num_threads; ++i) {
max_diag = std::max(max_diag, max_vector[i]);
}
return max_diag;
}
/**
* @brief This method returns the diagonal min value
* @param rA The LHS matrix
* @return The diagonal min value
*/
double GetMinDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double min_diag = std::numeric_limits<double>::max();
// #pragma omp parallel for reduction(min:min_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// min_diag = std::min(min_diag, std::abs(rA(i,i)));
// }
// return min_diag;
// Creating a buffer for parallel vector fill
const int num_threads = ParallelUtilities::GetNumThreads();
Vector min_vector(num_threads, std::numeric_limits<double>::max());
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii < min_vector[id])
min_vector[id] = abs_value_ii;
}
double min_diag = std::numeric_limits<double>::max();
for(int i = 0; i < num_threads; ++i) {
min_diag = std::min(min_diag, min_vector[i]);
}
return min_diag;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// Setting flags<
const std::string& r_diagonal_values_for_dirichlet_dofs = ThisParameters["diagonal_values_for_dirichlet_dofs"].GetString();
std::set<std::string> available_options_for_diagonal = {"no_scaling","use_max_diagonal","use_diagonal_norm","defined_in_process_info"};
if (available_options_for_diagonal.find(r_diagonal_values_for_dirichlet_dofs) == available_options_for_diagonal.end()) {
std::stringstream msg;
msg << "Currently prescribed diagonal values for dirichlet dofs : " << r_diagonal_values_for_dirichlet_dofs << "\n";
msg << "Admissible values for the diagonal scaling are : no_scaling, use_max_diagonal, use_diagonal_norm, or defined_in_process_info" << "\n";
KRATOS_ERROR << msg.str() << std::endl;
}
// The first option will not consider any scaling (the diagonal values will be replaced with 1)
if (r_diagonal_values_for_dirichlet_dofs == "no_scaling") {
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_max_diagonal") {
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_diagonal_norm") { // On this case the norm of the diagonal will be considered
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL;
} else { // Otherwise we will assume we impose a numerical value
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL;
}
mOptions.Set(SILENT_WARNINGS, ThisParameters["silent_warnings"].GetBool());
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++) {
partitions[i] = partitions[i - 1] + partition_size;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
// Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters
template<class TSparseSpace, class TDenseSpace, class TLinearSolver>
const Kratos::Flags ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::SILENT_WARNINGS(Kratos::Flags::Create(0));
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
GB_sort.c | //------------------------------------------------------------------------------
// GB_sort: sort all vectors in a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_sort.h"
#include "GB_werk.h"
#include "GB_transpose.h"
#include "GB_ek_slice.h"
// macros:
// GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend,
// GB_msort_ISO_ascend or _descend,
// or GB_msort_func_UDT
// GB_TYPE bool, int8_, ... or GB_void for UDT
// GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise
// GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in
// GB_GET(x,X,i) x = (op->xtype) X [i]
// GB_COPY(A,i,C,k) A [i] = C [k]
// GB_SWAP(A,i,k) swap A [i] and A [k]
// GB_LT compare two entries, x < y
//------------------------------------------------------------------------------
// macros for all built-in types
//------------------------------------------------------------------------------
#define GB_SORT_UDT 0
#define GB_ADDR(A,i) ((A) + (i))
#define GB_GET(x,A,i) GB_TYPE x = A [i]
#define GB_COPY(A,i,B,j) A [i] = B [j]
#define GB_SIZE sizeof (GB_TYPE)
#define GB_SWAP(A,i,j) { GB_TYPE t = A [i] ; A [i] = A [j] ; A [j] = t ; }
//------------------------------------------------------------------------------
// ascending sort for built-in types
//------------------------------------------------------------------------------
#define GB_LT(less,a,i,b,j) \
less = (((a) < (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false))
#define GB_TYPE bool
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_BOOL)
#include "GB_sort_template.c"
#define GB_TYPE int8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT8)
#include "GB_sort_template.c"
#define GB_TYPE int16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT16)
#include "GB_sort_template.c"
#define GB_TYPE int32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT32)
#include "GB_sort_template.c"
#define GB_TYPE int64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT64)
#include "GB_sort_template.c"
#define GB_TYPE uint8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT8)
#include "GB_sort_template.c"
#define GB_TYPE uint16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT16)
#include "GB_sort_template.c"
#define GB_TYPE uint32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT32)
#include "GB_sort_template.c"
#define GB_TYPE uint64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT64)
#include "GB_sort_template.c"
#define GB_TYPE float
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP32)
#include "GB_sort_template.c"
#define GB_TYPE double
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP64)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// descending sort for built-in types
//------------------------------------------------------------------------------
#undef GB_LT
#define GB_LT(less,a,i,b,j) \
less = (((a) > (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false))
#define GB_TYPE bool
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_BOOL)
#include "GB_sort_template.c"
#define GB_TYPE int8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT8)
#include "GB_sort_template.c"
#define GB_TYPE int16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT16)
#include "GB_sort_template.c"
#define GB_TYPE int32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT32)
#include "GB_sort_template.c"
#define GB_TYPE int64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT64)
#include "GB_sort_template.c"
#define GB_TYPE uint8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT8)
#include "GB_sort_template.c"
#define GB_TYPE uint16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT16)
#include "GB_sort_template.c"
#define GB_TYPE uint32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT32)
#include "GB_sort_template.c"
#define GB_TYPE uint64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT64)
#include "GB_sort_template.c"
#define GB_TYPE float
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP32)
#include "GB_sort_template.c"
#define GB_TYPE double
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP64)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// macros for user-defined types and when typecasting is performed
//------------------------------------------------------------------------------
#undef GB_ADDR
#undef GB_GET
#undef GB_COPY
#undef GB_SIZE
#undef GB_SWAP
#undef GB_LT
#define GB_ADDR(A,i) ((A) + (i) * csize)
#define GB_GET(x,A,i) GB_void x [GB_VLA(xsize)] ; \
fcast (x, GB_ADDR (A, i), csize)
#define GB_COPY(A,i,B,j) memcpy (GB_ADDR (A, i), GB_ADDR (B, j), csize)
#define GB_SIZE csize
#define GB_TYPE GB_void
#define GB_SWAP(A,i,j) \
{ \
GB_void t [GB_VLA(csize)] ; /* declare the scalar t */ \
memcpy (t, GB_ADDR (A, i), csize) ; /* t = A [i] */ \
GB_COPY (A, i, A, j) ; /* A [i] = A [j] */ \
memcpy (GB_ADDR (A, j), t, csize) ; /* A [j] = t */ \
}
#define GB_LT(less,a,i,b,j) \
{ \
flt (&less, a, b) ; /* less = (a < b) */ \
if (!less) \
{ \
/* check for equality and tie-break on index */ \
bool more ; \
flt (&more, b, a) ; /* more = (b < a) */ \
less = (more) ? false : ((i) < (j)) ; \
} \
}
#undef GB_SORT_UDT
#define GB_SORT_UDT 1
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _UDT)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// GB_sort
//------------------------------------------------------------------------------
#undef GB_FREE_WORKSPACE
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (C_ek_slicing, int64_t) ; \
GB_Matrix_free (&T) ; \
}
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
if (!C_is_NULL) GB_phbix_free (C) ; \
GB_phbix_free (P) ; \
}
// redefine to use the revised GB_FREE_ALL above:
#include "GB_static_header.h"
GrB_Info GB_sort
(
// output:
GrB_Matrix C, // matrix with sorted vectors on output
GrB_Matrix P, // matrix with permutations on output
// input:
GrB_BinaryOp op, // comparator for the sort
GrB_Matrix A, // matrix to sort
const bool A_transpose, // false: sort each row, true: sort each column
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT_MATRIX_OK (A, "A for GB_sort", GB0) ;
ASSERT_BINARYOP_OK (op, "op for GB_sort", GB0) ;
GrB_Matrix T = NULL ;
struct GB_Matrix_opaque T_header ;
GB_WERK_DECLARE (C_ek_slicing, int64_t) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
bool C_is_NULL = (C == NULL) ;
if (C_is_NULL && P == NULL)
{
// either C, or P, or both must be present
return (GrB_NULL_POINTER) ;
}
GrB_Type atype = A->type ;
GrB_Type ctype = (C_is_NULL) ? atype : C->type ;
GrB_Type ptype = (P == NULL) ? GrB_INT64 : P->type ;
if (op->ztype != GrB_BOOL || op->xtype != op->ytype || atype != ctype
|| !(ptype == GrB_INT64 || ptype == GrB_UINT64)
|| !GB_Type_compatible (atype, op->xtype))
{
// op must return bool, and its inputs x and y must have the same type;
// the types of A and C must match exactly; P must be INT64 or UINT64;
// A and C must be typecasted to the input type of the op.
return (GrB_DOMAIN_MISMATCH) ;
}
int64_t anrows = GB_NROWS (A) ;
int64_t ancols = GB_NCOLS (A) ;
if ((C != NULL && (GB_NROWS (C) != anrows || GB_NCOLS (C) != ancols)) ||
(P != NULL && (GB_NROWS (P) != anrows || GB_NCOLS (P) != ancols)))
{
// C and P must have the same dimensions as A
return (GrB_DIMENSION_MISMATCH) ;
}
bool A_iso = A->iso ;
bool sort_in_place = (A == C) ;
// free any prior content of C and P
GB_phbix_free (P) ;
if (!sort_in_place)
{
GB_phbix_free (C) ;
}
//--------------------------------------------------------------------------
// make a copy of A, unless it is aliased with C
//--------------------------------------------------------------------------
if (C_is_NULL)
{
// C is a temporary matrix, which is freed when done
GB_CLEAR_STATIC_HEADER (T, &T_header) ;
C = T ;
}
if (A_transpose)
{
// ensure C is in sparse or hypersparse CSC format
if (A->is_csc)
{
// A is already CSC
if (!sort_in_place)
{
// A = C
GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ;
}
}
else
{
// A is CSR but C must be CSC
if (sort_in_place)
{
// A = A'
GB_OK (GB_transpose_in_place (A, true, Context)) ;
}
else
{
// C = A'
GB_OK (GB_transpose_cast (C, atype, true, A, false, Context)) ;
}
}
}
else
{
// ensure C is in sparse or hypersparse CSR format
if (!A->is_csc)
{
// A is already CSR
if (!sort_in_place)
{
// A = C
GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ;
}
}
else
{
// A is CSC but C must be CSR
if (sort_in_place)
{
// A = A'
GB_OK (GB_transpose_in_place (A, false, Context)) ;
}
else
{
// C = A'
GB_OK (GB_transpose_cast (C, atype, false, A, false, Context)) ;
}
}
}
// ensure C is sparse or hypersparse CSC
if (GB_IS_BITMAP (C) || GB_IS_FULL (C))
{
GB_OK (GB_convert_any_to_sparse (C, Context)) ;
}
//--------------------------------------------------------------------------
// sort C in place
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
GB_Type_code acode = atype->code ;
if ((op->xtype == atype) && (op->ytype == atype) &&
(opcode == GB_LT_binop_code || opcode == GB_GT_binop_code) &&
(acode < GB_UDT_code))
{
//----------------------------------------------------------------------
// no typecasting, using built-in < or > operators, builtin types
//----------------------------------------------------------------------
if (opcode == GB_LT_binop_code)
{
// ascending sort
switch (acode)
{
case GB_BOOL_code :
GB_OK (GB(sort_matrix_ascend_BOOL )(C, Context)) ; break ;
case GB_INT8_code :
GB_OK (GB(sort_matrix_ascend_INT8 )(C, Context)) ; break ;
case GB_INT16_code :
GB_OK (GB(sort_matrix_ascend_INT16 )(C, Context)) ; break ;
case GB_INT32_code :
GB_OK (GB(sort_matrix_ascend_INT32 )(C, Context)) ; break ;
case GB_INT64_code :
GB_OK (GB(sort_matrix_ascend_INT64 )(C, Context)) ; break ;
case GB_UINT8_code :
GB_OK (GB(sort_matrix_ascend_UINT8 )(C, Context)) ; break ;
case GB_UINT16_code :
GB_OK (GB(sort_matrix_ascend_UINT16 )(C, Context)) ; break ;
case GB_UINT32_code :
GB_OK (GB(sort_matrix_ascend_UINT32 )(C, Context)) ; break ;
case GB_UINT64_code :
GB_OK (GB(sort_matrix_ascend_UINT64 )(C, Context)) ; break ;
case GB_FP32_code :
GB_OK (GB(sort_matrix_ascend_FP32 )(C, Context)) ; break ;
case GB_FP64_code :
GB_OK (GB(sort_matrix_ascend_FP64 )(C, Context)) ; break ;
default:;
}
}
else // opcode == GB_GT_binop_code
{
// descending sort
switch (acode)
{
case GB_BOOL_code :
GB_OK (GB(sort_matrix_descend_BOOL )(C, Context)) ; break ;
case GB_INT8_code :
GB_OK (GB(sort_matrix_descend_INT8 )(C, Context)) ; break ;
case GB_INT16_code :
GB_OK (GB(sort_matrix_descend_INT16 )(C, Context)) ; break ;
case GB_INT32_code :
GB_OK (GB(sort_matrix_descend_INT32 )(C, Context)) ; break ;
case GB_INT64_code :
GB_OK (GB(sort_matrix_descend_INT64 )(C, Context)) ; break ;
case GB_UINT8_code :
GB_OK (GB(sort_matrix_descend_UINT8 )(C, Context)) ; break ;
case GB_UINT16_code :
GB_OK (GB(sort_matrix_descend_UINT16)(C, Context)) ; break ;
case GB_UINT32_code :
GB_OK (GB(sort_matrix_descend_UINT32)(C, Context)) ; break ;
case GB_UINT64_code :
GB_OK (GB(sort_matrix_descend_UINT64)(C, Context)) ; break ;
case GB_FP32_code :
GB_OK (GB(sort_matrix_descend_FP32 )(C, Context)) ; break ;
case GB_FP64_code :
GB_OK (GB(sort_matrix_descend_FP64 )(C, Context)) ; break ;
default:;
}
}
}
else
{
//----------------------------------------------------------------------
// typecasting, user-defined types, or unconventional operators
//----------------------------------------------------------------------
GB_OK (GB (sort_matrix_UDT) (C, op, Context)) ;
}
//--------------------------------------------------------------------------
// constuct the final indices
//--------------------------------------------------------------------------
int64_t cnz = GB_nnz (C) ;
int64_t cnvec = C->nvec ;
int64_t *restrict Ti = NULL ;
if (P == NULL)
{
// P is not constructed; use C->i to construct the new indices
Ti = C->i ;
}
else
{
// allocate P->i and use it to construct the new indices
P->i = GB_MALLOC (cnz, int64_t, &(P->i_size)) ;
if (P->i == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Ti = P->i ;
}
int C_nthreads, C_ntasks ;
GB_SLICE_MATRIX (C, 1, chunk) ;
int64_t *restrict Cp = C->p ;
const int64_t cvlen = C->vlen ;
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static,1)
for (tid = 0 ; tid < C_ntasks ; tid++)
{
int64_t kfirst = kfirst_Cslice [tid] ;
int64_t klast = klast_Cslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
const int64_t pC0 = Cp [k] ;
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, tid, k,
kfirst, klast, pstart_Cslice, Cp, cvlen) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
Ti [pC] = pC - pC0 ;
}
}
}
//--------------------------------------------------------------------------
// construct P
//--------------------------------------------------------------------------
bool C_is_hyper = GB_IS_HYPERSPARSE (C) ;
if (P != NULL)
{
P->is_csc = C->is_csc ;
P->nvec = C->nvec ;
P->nvec_nonempty = C->nvec_nonempty ;
P->iso = false ;
P->vlen = C->vlen ;
P->vdim = C->vdim ;
if (C_is_NULL)
{
// the values of C are not needed. The indices of C become the
// values of P, Cp becomes Pp, and Ch (if present) becomes Ph.
P->x = C->i ; C->i = NULL ; P->x_size = C->i_size ;
P->p = C->p ; C->p = NULL ; P->p_size = C->p_size ;
P->h = C->h ; C->h = NULL ; P->h_size = C->h_size ;
P->plen = C->plen ;
}
else
{
// C is required on output. The indices of C are copied and
// become the values of P. Cp is copied to Pp, and Ch (if present)
// is copied to Ph.
P->plen = cnvec ;
P->x = GB_MALLOC (cnz, int64_t, &(P->x_size)) ; // x:OK
P->p = GB_MALLOC (cnvec+1, int64_t, &(P->p_size)) ;
P->h = NULL ;
if (C_is_hyper)
{
P->h = GB_MALLOC (cnvec, int64_t, &(P->h_size)) ;
}
if (P->x == NULL || P->p == NULL || (C_is_hyper && P->h == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// copy from C to P
GB_memcpy (P->x, C->i, cnz * sizeof (int64_t), nthreads_max) ;
GB_memcpy (P->p, C->p, (cnvec+1) * sizeof (int64_t), nthreads_max) ;
if (C_is_hyper)
{
GB_memcpy (P->h, C->h, cnvec * sizeof (int64_t), nthreads_max) ;
}
}
P->magic = GB_MAGIC ;
}
//--------------------------------------------------------------------------
// finalize the pattern of C
//--------------------------------------------------------------------------
if (!C_is_NULL && P != NULL)
{
// copy P->i into C->i
GB_memcpy (C->i, P->i, cnz * sizeof (int64_t), nthreads_max) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
if (!C_is_NULL) { ASSERT_MATRIX_OK (C, "C output of GB_sort", GB0) ; }
if (P != NULL) { ASSERT_MATRIX_OK (P, "P output of GB_sort", GB0) ; }
return (GrB_SUCCESS) ;
}
|
a2a_impl.h | //Implementations of methods in a2a.h
template<typename VWtype>
inline double VW_Mbyte_size(const A2AArg &_args, const typename VWtype::FieldInputParamType &field_setup_params){
typedef typename VWtype::DilutionType DilutionType;
typedef typename VWtype::FermionFieldType FermionFieldType;
DilutionType dil(_args); const int sz = dil.getNmodes();
double field_size = double(FermionFieldType::byte_size(field_setup_params))/(1024.*1024.);
return sz * field_size;
}
template< typename mf_Policies>
double A2AvectorV<mf_Policies>::Mbyte_size(const A2AArg &_args, const FieldInputParamType &field_setup_params){
return VW_Mbyte_size<A2AvectorV<mf_Policies> >(_args,field_setup_params);
}
template< typename mf_Policies>
double A2AvectorVfftw<mf_Policies>::Mbyte_size(const A2AArg &_args, const FieldInputParamType &field_setup_params){
return VW_Mbyte_size<A2AvectorVfftw<mf_Policies> >(_args,field_setup_params);
}
template< typename mf_Policies>
double A2AvectorW<mf_Policies>::Mbyte_size(const A2AArg &_args, const FieldInputParamType &field_setup_params){
FullyPackedIndexDilution dil(_args);
double ffield_size = double(FermionFieldType::byte_size(field_setup_params))/(1024.*1024.);
double cfield_size = double(ComplexFieldType::byte_size(field_setup_params))/(1024.*1024.);
return dil.getNl() * ffield_size + dil.getNhits() * cfield_size;
}
template< typename mf_Policies>
double A2AvectorWfftw<mf_Policies>::Mbyte_size(const A2AArg &_args, const FieldInputParamType &field_setup_params){
return VW_Mbyte_size<A2AvectorWfftw<mf_Policies> >(_args,field_setup_params);
}
struct VFFTfieldPolicyBasic{
template<typename T>
static inline void actionOutputMode(T &v, const int i){}
template<typename T>
static inline void actionInputMode(T &v, const int i){}
};
struct VFFTfieldPolicyAllocFree{
template<typename T>
static inline void actionOutputMode(T &v, const int i){
v.allocMode(i);
}
template<typename T>
static inline void actionInputMode(T &v, const int i){
v.freeMode(i);
}
};
template<typename OutputType, typename InputType, typename FFTfieldPolicy>
struct _V_fft_impl{
typedef typename InputType::FermionFieldType FermionFieldType;
static inline void fft(OutputType &to, InputType &from, fieldOperation<FermionFieldType>* mode_preop){
if(!UniqueID()){ printf("Doing V FFT\n"); fflush(stdout); }
typedef typename FermionFieldType::InputParamType FieldParamType;
FieldParamType field_setup = from.getMode(0).getDimPolParams();
FermionFieldType tmp(field_setup);
Float preop_time = 0;
Float fft_time = 0;
const bool fft_dirs[4] = {true,true,true,false};
for(int mode=0;mode<from.getNmodes();mode++){
FermionFieldType const* init_gather_from = &from.getMode(mode);
if(mode_preop != NULL){
Float dtime = dclock();
(*mode_preop)(from.getMode(mode),tmp);
init_gather_from = &tmp;
preop_time += dclock()-dtime;
}
Float dtime = dclock();
FFTfieldPolicy::actionOutputMode(to, mode); //alloc
#ifndef MEMTEST_MODE
cps::fft_opt(to.getMode(mode), *init_gather_from, fft_dirs);
#endif
fft_time += dclock() - dtime;
FFTfieldPolicy::actionInputMode(from, mode); //free
}
if(!UniqueID()){ printf("Finishing V FFT\n"); fflush(stdout); }
print_time("A2AvectorVfftw::fft","Preop",preop_time);
print_time("A2AvectorVfftw::fft","FFT",fft_time);
}
};
//Set this object to be the fast Fourier transform of the input field
//Can optionally supply an object mode_preop that performs a transformation on each mode prior to the FFT
template< typename mf_Policies>
void A2AvectorVfftw<mf_Policies>::fft(const A2AvectorV<mf_Policies> &from, fieldOperation<FermionFieldType>* mode_preop){
_V_fft_impl<A2AvectorVfftw<mf_Policies>, const A2AvectorV<mf_Policies>, VFFTfieldPolicyBasic>::fft(*this,from,mode_preop);
}
template< typename mf_Policies>
template<typename P>
void A2AvectorVfftw<mf_Policies>::destructivefft(A2AvectorV<P> &from, fieldOperation<typename P::FermionFieldType>* mode_preop, VFFTW_ENABLE_IF_MANUAL_ALLOC(P) ){
_V_fft_impl<A2AvectorVfftw<P>, A2AvectorV<P>, VFFTfieldPolicyAllocFree>::fft(*this,from,mode_preop);
}
template<typename OutputType, typename InputType, typename FFTfieldPolicy>
struct _V_invfft_impl{
typedef typename InputType::FermionFieldType FermionFieldType;
static inline void inversefft(OutputType &to, InputType &from, fieldOperation<FermionFieldType>* mode_postop){
if(!UniqueID()){ printf("Doing V inverse FFT\n"); fflush(stdout); }
typedef typename FermionFieldType::InputParamType FieldParamType;
FieldParamType field_setup = from.getMode(0).getDimPolParams();
FermionFieldType tmp(field_setup);
Float postop_time = 0;
Float fft_time = 0;
const bool fft_dirs[4] = {true,true,true,false};
for(int mode=0;mode<from.getNmodes();mode++){
//if(!UniqueID()) printf("Mode %d, memory before output alloc\n",mode);
//printMem();
FFTfieldPolicy::actionOutputMode(to, mode); //alloc
//if(!UniqueID()) printf("Mode %d, memory after output alloc\n",mode);
//printMem();
FermionFieldType* out = mode_postop == NULL ? &to.getMode(mode) : &tmp;
Float dtime = dclock();
#ifndef MEMTEST_MODE
cps::fft_opt(*out, from.getMode(mode), fft_dirs, true);
#endif
//if(!UniqueID()) printf("Mode %d, memory before input free\n",mode);
//printMem();
FFTfieldPolicy::actionInputMode(from, mode); //alloc
//if(!UniqueID()) printf("Mode %d, memory after input free\n",mode);
//printMem();
if(mode_postop != NULL){
Float dtime = dclock();
(*mode_postop)(tmp,to.getMode(mode));
postop_time += dclock()-dtime;
}
fft_time += dclock() - dtime;
//printMem();
}
if(!UniqueID()){ printf("Finishing V invert FFT\n"); fflush(stdout); }
print_time("A2AvectorVfftw::inversefft","FFT",fft_time);
print_time("A2AvectorVfftw::inversefft","Postop",postop_time);
}
};
template< typename mf_Policies>
void A2AvectorVfftw<mf_Policies>::inversefft(A2AvectorV<Policies> &to, fieldOperation<FermionFieldType>* mode_postop) const{
_V_invfft_impl<A2AvectorV<Policies>, const A2AvectorVfftw<mf_Policies>, VFFTfieldPolicyBasic>::inversefft(to,*this,mode_postop);
}
template< typename mf_Policies>
template<typename P>
void A2AvectorVfftw<mf_Policies>::destructiveInversefft(A2AvectorV<P> &to, fieldOperation<typename P::FermionFieldType>* mode_postop, VFFTW_ENABLE_IF_MANUAL_ALLOC(P) ){
_V_invfft_impl<A2AvectorV<Policies>, A2AvectorVfftw<mf_Policies>, VFFTfieldPolicyAllocFree>::inversefft(to,*this,mode_postop);
}
struct WFFTfieldPolicyBasic{
template<typename T>
static inline void actionOutputLowMode(T &v, const int i){}
template<typename T>
static inline void actionOutputHighMode(T &v, const int i){}
template<typename T>
static inline void actionInputLowMode(T &v, const int i){}
template<typename T>
static inline void actionInputHighMode(T &v, const int i){}
};
struct WFFTfieldPolicyAllocFree{
template<typename T>
static inline void actionOutputLowMode(T &v, const int i){
v.allocLowMode(i);
}
template<typename T>
static inline void actionOutputHighMode(T &v, const int i){
v.allocHighMode(i);
}
template<typename T>
static inline void actionInputLowMode(T &v, const int i){
v.freeLowMode(i);
}
template<typename T>
static inline void actionInputHighMode(T &v, const int i){
v.freeHighMode(i);
}
};
template<typename OutputType, typename InputType, typename FFTfieldPolicy>
struct _W_fft_impl{
typedef typename InputType::FermionFieldType FermionFieldType;
inline static void fft(OutputType &to, InputType &from, fieldOperation<FermionFieldType>* mode_preop){
if(!UniqueID()){ printf("Doing W FFT\n"); fflush(stdout); }
typedef typename FermionFieldType::InputParamType FieldParamType;
FieldParamType field_setup = from.getWh(0).getDimPolParams();
FermionFieldType tmp(field_setup), tmp2(field_setup);
Float preop_time = 0;
Float fft_time = 0;
const bool fft_dirs[4] = {true,true,true,false};
//Do wl
for(int mode=0;mode<from.getNl();mode++){
FermionFieldType const* init_gather_from = &from.getWl(mode);
if(mode_preop != NULL){
Float dtime = dclock();
(*mode_preop)(from.getWl(mode),tmp);
init_gather_from = &tmp;
preop_time += dclock()-dtime;
}
FFTfieldPolicy::actionOutputLowMode(to, mode); //alloc
Float dtime = dclock();
#ifndef MEMTEST_MODE
cps::fft_opt(to.getWl(mode), *init_gather_from, fft_dirs);
#endif
fft_time += dclock() - dtime;
FFTfieldPolicy::actionInputLowMode(from, mode); //free
}
//Do wh. First we need to uncompact the spin/color index as this is acted upon by the operator
for(int hit=0;hit<from.getNhits();hit++){
for(int sc=0;sc<12;sc++){ //spin/color dilution index
from.getSpinColorDilutedSource(tmp2,hit,sc);
FermionFieldType* init_gather_from = &tmp2;
if(mode_preop != NULL){
Float dtime = dclock();
(*mode_preop)(tmp2,tmp);
init_gather_from = &tmp;
preop_time += dclock()-dtime;
}
Float dtime = dclock();
FFTfieldPolicy::actionOutputHighMode(to, sc+12*hit); //alloc
#ifndef MEMTEST_MODE
cps::fft_opt(to.getWh(hit,sc), *init_gather_from, fft_dirs);
#endif
fft_time += dclock()-dtime;
}
FFTfieldPolicy::actionInputHighMode(from, hit); //free
}
if(!UniqueID()){ printf("Finishing W FFT\n"); fflush(stdout); }
print_time("A2AvectorWfftw::fft","Preop",preop_time);
print_time("A2AvectorWfftw::fft","FFT",fft_time);
}
};
//Set this object to be the fast Fourier transform of the input field
//Can optionally supply an object mode_preop that performs a transformation on each mode prior to the FFT
template< typename mf_Policies>
void A2AvectorWfftw<mf_Policies>::fft(const A2AvectorW<mf_Policies> &from, fieldOperation<FermionFieldType>* mode_preop){
_W_fft_impl<A2AvectorWfftw<mf_Policies>, const A2AvectorW<mf_Policies>, WFFTfieldPolicyBasic>::fft(*this,from,mode_preop);
}
template< typename mf_Policies>
template<typename P>
void A2AvectorWfftw<mf_Policies>::destructivefft(A2AvectorW<mf_Policies> &from, fieldOperation<FermionFieldType>* mode_preop, WFFTW_ENABLE_IF_MANUAL_ALLOC(P) ){
_W_fft_impl<A2AvectorWfftw<mf_Policies>, A2AvectorW<mf_Policies>, WFFTfieldPolicyAllocFree>::fft(*this,from,mode_preop);
}
template<typename OutputType, typename InputType, typename FFTfieldPolicy>
struct _W_invfft_impl{
typedef typename InputType::FermionFieldType FermionFieldType;
static inline void inversefft(OutputType &to, InputType &from, fieldOperation<FermionFieldType>* mode_postop){
if(!UniqueID()){ printf("Doing W inverse FFT\n"); fflush(stdout); }
typedef typename FermionFieldType::InputParamType FieldParamType;
FieldParamType field_setup = from.getWh(0,0).getDimPolParams();
FermionFieldType tmp(field_setup), tmp2(field_setup);
Float postop_time = 0;
Float fft_time = 0;
const bool fft_dirs[4] = {true,true,true,false};
//Do wl
for(int mode=0;mode<from.getNl();mode++){
FFTfieldPolicy::actionOutputLowMode(to, mode); //alloc
FermionFieldType * unfft_to = mode_postop == NULL ? &to.getWl(mode) : &tmp;
Float dtime = dclock();
#ifndef MEMTEST_MODE
cps::fft_opt(*unfft_to, from.getWl(mode), fft_dirs, true);
#endif
fft_time += dclock() - dtime;
if(mode_postop != NULL){
Float dtime = dclock();
(*mode_postop)(tmp,to.getWl(mode));
postop_time += dclock()-dtime;
}
FFTfieldPolicy::actionInputLowMode(from, mode); //free
}
//Do wh. First we need to uncompact the spin/color index as this is acted upon by the operator
for(int hit=0;hit<from.getNhits();hit++){
FFTfieldPolicy::actionOutputHighMode(to, hit); //alloc
typename InputType::ComplexFieldType & to_hit = to.getWh(hit);
const int sc = 0;
FermionFieldType * compress = mode_postop == NULL ? &tmp2 : &tmp;
Float dtime = dclock();
#ifndef MEMTEST_MODE
cps::fft_opt(tmp2, from.getWh(hit,sc), fft_dirs, true);
#endif
fft_time += dclock()-dtime;
if(mode_postop != NULL){
Float dtime = dclock();
(*mode_postop)(tmp2,tmp);
postop_time += dclock()-dtime;
}
//Should give a multiple of the 12-component unit vector with 1 on index sc
#pragma omp parallel for
for(int i=0;i<to_hit.nfsites();i++)
*(to_hit.fsite_ptr(i)) = *(compress->fsite_ptr(i) + sc);
for(int ssc=0;ssc<12;ssc++) FFTfieldPolicy::actionInputHighMode(from, ssc + 12*hit); //free for all sc
}
if(!UniqueID()){ printf("Finishing W inverse FFT\n"); fflush(stdout); }
print_time("A2AvectorWfftw::fftinverse","FFT",fft_time);
print_time("A2AvectorWfftw::fftinverse","Postop",postop_time);
}
};
template< typename mf_Policies>
void A2AvectorWfftw<mf_Policies>::inversefft(A2AvectorW<mf_Policies> &to, fieldOperation<FermionFieldType>* mode_postop) const{
_W_invfft_impl<A2AvectorW<mf_Policies>, const A2AvectorWfftw<mf_Policies>, WFFTfieldPolicyBasic>::inversefft(to,*this,mode_postop);
}
template< typename mf_Policies>
template<typename P>
void A2AvectorWfftw<mf_Policies>::destructiveInversefft(A2AvectorW<mf_Policies> &to, fieldOperation<FermionFieldType>* mode_postop, WFFTW_ENABLE_IF_MANUAL_ALLOC(P)){
_W_invfft_impl<A2AvectorW<mf_Policies>, A2AvectorWfftw<mf_Policies>, WFFTfieldPolicyAllocFree>::inversefft(to,*this,mode_postop);
}
//Generate the wh field. We store in a compact notation that knows nothing about any dilution we apply when generating V from this
//For reproducibility we want to generate the wh field in the same order that Daiqian did originally. Here nhit random numbers are generated for each site/flavor
template<typename ComplexFieldType, typename complex_class>
struct _set_wh_random_impl{};
template<typename ComplexFieldType>
struct _set_wh_random_impl<ComplexFieldType, complex_double_or_float_mark>{
static void doit(std::vector<PtrWrapper<ComplexFieldType> > &wh, const RandomType &type, const int nhits){
typedef typename ComplexFieldType::FieldSiteType FieldSiteType;
LRG.SetInterval(1, 0);
int sites = wh[0]->nsites(), flavors = wh[0]->nflavors();
for(int i = 0; i < sites*flavors; ++i) {
int flav = i / sites;
int st = i % sites;
LRG.AssignGenerator(st,flav);
for(int j = 0; j < nhits; ++j) {
FieldSiteType* p = wh[j]->site_ptr(st,flav);
RandomComplex<FieldSiteType>::rand(p,type,FOUR_D);
}
}
}
};
template< typename mf_Policies>
void A2AvectorW<mf_Policies>::setWhRandom(const RandomType &type){
_set_wh_random_impl<typename mf_Policies::ComplexFieldType, typename ComplexClassify<typename mf_Policies::ComplexFieldType::FieldSiteType>::type>::doit(wh,type,nhits);
}
//Get the diluted source with index id.
//We use the same set of random numbers for each spin and dilution as we do not need to rely on stochastic cancellation to separate them
//For legacy reasons we use different random numbers for the two G-parity flavors, although this is not strictly necessary
template< typename mf_Policies>
template<typename TargetFermionFieldType>
void A2AvectorW<mf_Policies>::getDilutedSource(TargetFermionFieldType &into, const int dil_id) const{
typedef FieldSiteType mf_Complex;
typedef typename TargetFermionFieldType::FieldSiteType TargetComplex;
const char* fname = "getDilutedSource(...)";
int hit, tblock, spin_color, flavor;
StandardIndexDilution stdidx(getArgs());
stdidx.indexUnmap(dil_id,hit,tblock,spin_color,flavor);
VRB.Result("A2AvectorW", fname, "Generating random wall source %d = (%d, %d, %d, %d).\n ", dil_id, hit, tblock, flavor, spin_color);
int tblock_origt = tblock * args.src_width;
into.zero();
if(tblock_origt / GJP.TnodeSites() != GJP.TnodeCoor()){
VRB.Result("A2AvectorW", fname, "Not on node\n ");
return;
}
int tblock_origt_lcl = tblock_origt % GJP.TnodeSites();
int src_size = GJP.VolNodeSites()/GJP.TnodeSites() * args.src_width; //size of source in units of complex numbers
#pragma omp parallel for
for(int i=0;i<src_size;i++){
int x[4];
int rem = i;
x[0] = rem % GJP.XnodeSites(); rem /= GJP.XnodeSites();
x[1] = rem % GJP.YnodeSites(); rem /= GJP.YnodeSites();
x[2] = rem % GJP.ZnodeSites(); rem /= GJP.ZnodeSites();
x[3] = tblock_origt_lcl + rem;
TargetComplex *into_site = (TargetComplex*)(into.site_ptr(x,flavor) + spin_color);
mf_Complex const* from_site = (mf_Complex*)wh[hit]->site_ptr(x,flavor); //note same random numbers for each spin/color!
*into_site = *from_site;
}
}
//When gauge fixing prior to taking the FFT it is necessary to uncompact the wh field in the spin-color index, as these indices are acted upon by the gauge fixing
//(I suppose technically only the color indices need uncompacting; this might be considered as a future improvement)
template< typename mf_Policies>
void A2AvectorW<mf_Policies>::getSpinColorDilutedSource(FermionFieldType &into, const int hit, const int sc_id) const{
const char* fname = "getSpinColorDilutedSource(...)";
into.zero();
#pragma omp parallel for
for(int i=0;i<wh[hit]->nfsites();i++){ //same mapping, different site_size
FieldSiteType &into_site = *(into.fsite_ptr(i) + sc_id);
const FieldSiteType &from_site = *(wh[hit]->fsite_ptr(i));
into_site = from_site;
}
}
template<typename mf_Policies, typename my_enable_if<_equal<typename ComplexClassify<typename mf_Policies::ComplexType>::type, complex_double_or_float_mark>::value, int>::type = 0>
void randomizeVW(A2AvectorV<mf_Policies> &V, A2AvectorW<mf_Policies> &W){
typedef typename mf_Policies::FermionFieldType FermionFieldType;
typedef typename mf_Policies::ComplexFieldType ComplexFieldType;
int nl = V.getNl();
int nh = V.getNh(); //number of fully diluted high-mode indices
int nhit = V.getNhits();
assert(nl == W.getNl());
assert(nh == W.getNh());
assert(nhit == W.getNhits());
std::vector<FermionFieldType> wl(nl);
for(int i=0;i<nl;i++) wl[i].setUniformRandom();
std::vector<FermionFieldType> vl(nl);
for(int i=0;i<nl;i++) vl[i].setUniformRandom();
std::vector<ComplexFieldType> wh(nhit);
for(int i=0;i<nhit;i++) wh[i].setUniformRandom();
std::vector<FermionFieldType> vh(nh);
for(int i=0;i<nh;i++) vh[i].setUniformRandom();
for(int i=0;i<nl;i++){
V.importVl(vl[i],i);
W.importWl(wl[i],i);
}
for(int i=0;i<nh;i++)
V.importVh(vh[i],i);
for(int i=0;i<nhit;i++)
W.importWh(wh[i],i);
}
//Ensure this generates randoms in the same order as the scalar version
template<typename mf_Policies, typename my_enable_if<_equal<typename ComplexClassify<typename mf_Policies::ComplexType>::type, grid_vector_complex_mark>::value, int>::type = 0>
void randomizeVW(A2AvectorV<mf_Policies> &V, A2AvectorW<mf_Policies> &W){
typedef typename mf_Policies::FermionFieldType::FieldDimensionPolicy::EquivalentScalarPolicy ScalarDimensionPolicy;
typedef CPSfermion4D<typename mf_Policies::ScalarComplexType, ScalarDimensionPolicy, DynamicFlavorPolicy, StandardAllocPolicy> ScalarFermionFieldType;
typedef CPScomplex4D<typename mf_Policies::ScalarComplexType, ScalarDimensionPolicy, DynamicFlavorPolicy, StandardAllocPolicy> ScalarComplexFieldType;
int nl = V.getNl();
int nh = V.getNh(); //number of fully diluted high-mode indices
int nhit = V.getNhits();
assert(nl == W.getNl());
assert(nh == W.getNh());
assert(nhit == W.getNhits());
ScalarFermionFieldType tmp;
ScalarComplexFieldType tmp_cmplx;
for(int i=0;i<nl;i++){
tmp.setUniformRandom();
W.getWl(i).importField(tmp);
}
for(int i=0;i<nl;i++){
tmp.setUniformRandom();
V.getVl(i).importField(tmp);
}
for(int i=0;i<nhit;i++){
tmp_cmplx.setUniformRandom();
W.getWh(i).importField(tmp_cmplx);
}
for(int i=0;i<nh;i++){
tmp.setUniformRandom();
V.getVh(i).importField(tmp);
}
}
template< typename FieldType>
FieldType const * getBaseAndShift(int shift[3], const int p[3], FieldType const *base_p, FieldType const *base_m){
//With G-parity base_p has momentum +1 in each G-parity direction, base_m has momentum -1 in each G-parity direction.
//Non-Gparity directions are assumed to have momentum 0
//Units of momentum are 2pi/L for periodic BCs, pi/L for antiperiodic and pi/2L for Gparity
FieldType const * out = GJP.Gparity() ? NULL : base_p;
for(int d=0;d<3;d++){
if(GJP.Bc(d) == BND_CND_GPARITY){
//Type 1 : f_{p=4b+1}(n) = f_+1(n+b) // p \in {.. -7 , -3, 1, 5, 9 ..}
//Type 2 : f_{p=4b-1}(n) = f_-1(n+b) // p \n {.. -5, -1, 3, 7 , 11 ..}
if( (p[d]-1) % 4 == 0 ){
//Type 1
int b = (p[d]-1)/4;
shift[d] = -b; //shift f_+1 backwards by b
if(out == NULL) out = base_p;
else if(out != base_p) ERR.General("","getBaseAndShift","Momentum (%d,%d,%d) appears to be invalid because momenta in different G-parity directions do not reside in the same set\n",p[0],p[1],p[2]);
}else if( (p[d]+1) % 4 == 0 ){
//Type 2
int b = (p[d]+1)/4;
shift[d] = -b; //shift f_-1 backwards by b
if(out == NULL) out = base_m;
else if(out != base_m) ERR.General("","getBaseAndShift","Momentum (%d,%d,%d) appears to be invalid because momenta in different G-parity directions do not reside in the same set\n",p[0],p[1],p[2]);
}else ERR.General("","getBaseAndShift","Momentum (%d,%d,%d) appears to be invalid because one or more components in G-parity directions are not allowed\n",p[0],p[1],p[2]);
}else{
//f_b(n) = f_0(n+b)
//Let the other directions decide on which base to use if some of them are G-parity dirs ; otherwise the pointer defaults to base_p above
shift[d] = -p[d];
}
}
if(!UniqueID()) printf("getBaseAndShift for p=(%d,%d,%d) determined shift=(%d,%d,%d) from ptr %c\n",p[0],p[1],p[2],shift[0],shift[1],shift[2],out == base_p ? 'p' : 'm');
assert(out != NULL);
return out;
}
//Use the relations between FFTs to obtain the FFT for a chosen quark momentum
//With G-parity BCs there are 2 disjoint sets of momenta hence there are 2 base FFTs
template< typename mf_Policies>
void A2AvectorWfftw<mf_Policies>::getTwistedFFT(const int p[3], A2AvectorWfftw<Policies> const *base_p, A2AvectorWfftw<Policies> const *base_m){
Float time = -dclock();
std::vector<int> shift(3);
A2AvectorWfftw<mf_Policies> const* base = getBaseAndShift(&shift[0], p, base_p, base_m);
if(base == NULL) ERR.General("A2AvectorWfftw","getTwistedFFT","Base pointer for twist momentum (%d,%d,%d) is NULL\n",p[0],p[1],p[2]);
wl = base->wl;
wh = base->wh;
int nshift = 0;
for(int i=0;i<3;i++) if(shift[i]) nshift++;
if(nshift > 0){
for(int i=0;i<this->getNmodes();i++)
shiftPeriodicField( this->getMode(i), base->getMode(i), shift);
}
time += dclock();
print_time("A2AvectorWfftw::getTwistedFFT","Twist",time);
}
template< typename mf_Policies>
void A2AvectorWfftw<mf_Policies>::shiftFieldsInPlace(const std::vector<int> &shift){
Float time = -dclock();
int nshift = 0;
for(int i=0;i<3;i++) if(shift[i]) nshift++;
if(nshift > 0){
for(int i=0;i<this->getNmodes();i++)
shiftPeriodicField( this->getMode(i), this->getMode(i), shift);
}
print_time("A2AvectorWfftw::shiftFieldsInPlace","Total",time + dclock());
}
//A version of the above that directly shifts the base Wfftw rather than outputting into a separate storage
//Returns the pointer to the Wfftw acted upon and the *shift required to restore the Wfftw to it's original form*
template< typename mf_Policies>
std::pair< A2AvectorWfftw<mf_Policies>*, std::vector<int> > A2AvectorWfftw<mf_Policies>::inPlaceTwistedFFT(const int p[3], A2AvectorWfftw<mf_Policies> *base_p, A2AvectorWfftw<mf_Policies> *base_m){
Float time = -dclock();
std::vector<int> shift(3);
A2AvectorWfftw<mf_Policies>* base = getBaseAndShift(&shift[0], p, base_p, base_m);
if(base == NULL) ERR.General("A2AvectorWfftw","getTwistedFFT","Base pointer for twist momentum (%d,%d,%d) is NULL\n",p[0],p[1],p[2]);
base->shiftFieldsInPlace(shift);
for(int i=0;i<3;i++) shift[i] = -shift[i];
time += dclock();
print_time("A2AvectorWfftw::inPlaceTwistedFFT","Twist",time);
return std::pair< A2AvectorWfftw<mf_Policies>*, std::vector<int> >(base,shift);
}
template< typename mf_Policies>
void A2AvectorVfftw<mf_Policies>::getTwistedFFT(const int p[3], A2AvectorVfftw<Policies> const *base_p, A2AvectorVfftw<Policies> const *base_m){
Float time = -dclock();
std::vector<int> shift(3);
A2AvectorVfftw<mf_Policies> const* base = getBaseAndShift(&shift[0], p, base_p, base_m);
if(base == NULL) ERR.General("A2AvectorVfftw","getTwistedFFT","Base pointer for twist momentum (%d,%d,%d) is NULL\n",p[0],p[1],p[2]);
v = base->v;
int nshift = 0;
for(int i=0;i<3;i++) if(shift[i]) nshift++;
if(nshift > 0){
for(int i=0;i<this->getNmodes();i++)
shiftPeriodicField( this->getMode(i), base->getMode(i), shift);
}
time += dclock();
print_time("A2AvectorVfftw::getTwistedFFT","Twist",time);
}
template< typename mf_Policies>
void A2AvectorVfftw<mf_Policies>::shiftFieldsInPlace(const std::vector<int> &shift){
Float time = -dclock();
int nshift = 0;
for(int i=0;i<3;i++) if(shift[i]) nshift++;
if(nshift > 0){
for(int i=0;i<this->getNmodes();i++)
shiftPeriodicField( this->getMode(i), this->getMode(i), shift);
}
print_time("A2AvectorVfftw::shiftFieldsInPlace","Total",time + dclock());
}
//A version of the above that directly shifts the base Wfftw rather than outputting into a separate storage
//Returns the pointer to the Wfftw acted upon and the *shift required to restore the Wfftw to it's original form*
template< typename mf_Policies>
std::pair< A2AvectorVfftw<mf_Policies>*, std::vector<int> > A2AvectorVfftw<mf_Policies>::inPlaceTwistedFFT(const int p[3], A2AvectorVfftw<mf_Policies> *base_p, A2AvectorVfftw<mf_Policies> *base_m){
Float time = -dclock();
std::vector<int> shift(3);
A2AvectorVfftw<mf_Policies>* base = getBaseAndShift(&shift[0], p, base_p, base_m);
if(base == NULL) ERR.General("A2AvectorWfftw","getTwistedFFT","Base pointer for twist momentum (%d,%d,%d) is NULL\n",p[0],p[1],p[2]);
base->shiftFieldsInPlace(shift);
for(int i=0;i<3;i++) shift[i] = -shift[i];
time += dclock();
print_time("A2AvectorVfftw::inPlaceTwistedFFT","Twist",time);
return std::pair< A2AvectorVfftw<mf_Policies>*, std::vector<int> >(base,shift);
}
|
problem.p6.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
// should be continuous in u, u', u'', u''', and u'''' to guarantee high order and periodic boundaries
// v(w) = ???
// u(x,y,z) = v(x)v(y)v(z)
// If Periodic, then the integral of the RHS should sum to zero.
// Setting shift=1.0 should ensure that the integrals of X, Y, or Z should sum to zero...
// That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero
// If dirichlet, then w(0)=w(1) = 0.0
// Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary
// u = ax^6 + bx^5 + cx^4 + dx^3 + ex^2 + fx + g
// ux = 6ax^5 + 5bx^4 + 4cx^3 + 3dx^2 + 2ex + f
// uxx = 30ax^4 + 20bx^3 + 12cx^2 + 6dx + 2e
// a = 42.0
// b = -126.0
// c = 105.0
// d = 0.0
// e = -21.0
// f = 0.0
// g = 1.0
double shift = 0.0;if(isPeriodic)shift= 1.0/21.0;
double X = 2.0*pow(x,6) - 6.0*pow(x,5) + 5.0*pow(x,4) - 1.0*pow(x,2) + shift;
double Y = 2.0*pow(y,6) - 6.0*pow(y,5) + 5.0*pow(y,4) - 1.0*pow(y,2) + shift;
double Z = 2.0*pow(z,6) - 6.0*pow(z,5) + 5.0*pow(z,4) - 1.0*pow(z,2) + shift;
double Xx = 12.0*pow(x,5) - 30.0*pow(x,4) + 20.0*pow(x,3) - 2.0*x;
double Yy = 12.0*pow(y,5) - 30.0*pow(y,4) + 20.0*pow(y,3) - 2.0*y;
double Zz = 12.0*pow(z,5) - 30.0*pow(z,4) + 20.0*pow(z,3) - 2.0*z;
double Xxx = 60.0*pow(x,4) - 120.0*pow(x,3) + 60.0*pow(x,2) - 2.0;
double Yyy = 60.0*pow(y,4) - 120.0*pow(y,3) + 60.0*pow(y,2) - 2.0;
double Zzz = 60.0*pow(z,4) - 120.0*pow(z,3) + 60.0*pow(z,2) - 2.0;
double u = X *Y *Z ;
double ux = Xx *Y *Z ;
double uy = X *Yy *Z ;
double uz = X *Y *Zz ;
double uxx = Xxx*Y *Z ;
double uyy = X *Yyy*Z ;
double uzz = X *Y *Zzz;
*U = X*Y*Z;
*Ux = Xx*Y*Z;
*Uy = X*Yy*Z;
*Uz = X*Y*Zz;
*Uxx = Xxx*Y*Z;
*Uyy = X*Yyy*Z;
*Uzz = X*Y*Zzz;
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
box_type *lbox = (box_type *)&level->my_boxes[box];
memset((double *)lbox->vectors[VECTOR_ALPHA ].get(),0,lbox->volume*sizeof(double));
memset((double *)lbox->vectors[VECTOR_BETA_I].get(),0,lbox->volume*sizeof(double));
memset((double *)lbox->vectors[VECTOR_BETA_J].get(),0,lbox->volume*sizeof(double));
memset((double *)lbox->vectors[VECTOR_BETA_K].get(),0,lbox->volume*sizeof(double));
memset((double *)lbox->vectors[VECTOR_UTRUE ].get(),0,lbox->volume*sizeof(double));
memset((double *)lbox->vectors[VECTOR_F ].get(),0,lbox->volume*sizeof(double));
int i,j,k;
const int jStride = lbox->jStride;
const int kStride = lbox->kStride;
const int ghosts = lbox->ghosts;
const int dim_i = lbox->dim;
const int dim_j = lbox->dim;
const int dim_k = lbox->dim;
// #pragma omp parallel for private(k,j,i) collapse(3)
hclib::finish([&a, &dim_k, &dim_j, &dim_i, &hLevel, &lbox, &b, &level, &kStride, &ghosts, &jStride] {
hclib::loop_domain_3d loop(dim_k, dim_j, dim_i);
hclib::forasync3D_nb(&loop, [&a, &hLevel, &lbox, &b, &level, &kStride, &ghosts, &jStride] (int k, int j, int i) {
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// FIX... move to quadrature version to initialize the problem.
// i.e. the value of an array element is the average value of the function over the cell (finite volume)
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+lbox->low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+lbox->low.j) + 0.5 );
double z = hLevel*( (double)(k+lbox->low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
lbox->vectors[VECTOR_BETA_I][ijk] = Bi;
lbox->vectors[VECTOR_BETA_J][ijk] = Bj;
lbox->vectors[VECTOR_BETA_K][ijk] = Bk;
lbox->vectors[VECTOR_ALPHA ][ijk] = A;
lbox->vectors[VECTOR_UTRUE ][ijk] = U;
lbox->vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
});
});
}
// quick test for Poisson...
if(level->alpha_is_zero==-1)level->alpha_is_zero = (dot(level,VECTOR_ALPHA,VECTOR_ALPHA) == 0.0);
}
//------------------------------------------------------------------------------------------------------------------------------
|
driver.c | #include "driver.h"
int main(int argc, char** argv)
{
LIKWID_MARKER_INIT;
int provided, flag, claimed;
MPI_Init_thread( &argc, &argv, MPI_THREAD_MULTIPLE, &provided );
// MPI_Is_thread_main( &flag );
// if (!flag)
// printf( "This thread called init_thread but Is_thread_main gave false\n" );fflush(stdout);
MPI_Query_thread( &claimed );
if (claimed != provided)
printf( "Query thread gave thread level %d but Init_thread gave %d\n", claimed, provided );fflush(stdout);
// MPI_Init(&argc,&argv);
int mpi_rank, mpi_size;
MPI_Comm_rank (MPI_COMM_WORLD, &(mpi_rank));
MPI_Comm_size (MPI_COMM_WORLD, &(mpi_size));
// configure the experiment's parameters
Parameters p;
p.mpi_rank = mpi_rank;
p.mpi_size = mpi_size;
param_default(&p);
parse_args(argc, argv, &p);
#pragma omp parallel num_threads(p.num_threads)
{
LIKWID_MARKER_THREADINIT;
}
// Simple error checking
if (p.t.shape[0]*p.t.shape[1]*p.t.shape[2] != p.mpi_size) {
if(p.mpi_rank==0) fprintf(stderr,"ERROR: requested MPI topology shape does not match the available processes count: \n\tRequested:%03d \n\tAvailable:%03d\n",
p.t.shape[0]*p.t.shape[1]*p.t.shape[2], p.mpi_size);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 1;
}
// Create the MPI topology
mpi_topology_init(&p);
// MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
// initialize time-stepper specific requirements
init(&p);
// Verify the time stepper kernel if required
if (p.verify !=0) {
verify(&p);
} else { // do performance tests
performance_test(&p);
}
MPI_Finalize();
LIKWID_MARKER_CLOSE;
return 0;
}
|
nr_ao2mo.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <math.h>
#include <assert.h>
//#define NDEBUG
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "np_helper/np_helper.h"
#include "vhf/cvhf.h"
#include "vhf/fblas.h"
#include "vhf/nr_direct.h"
#include "nr_ao2mo.h"
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
// 9f or 7g or 5h functions should be enough
#define NCTRMAX 64
#define OUTPUTIJ 1
#define INPUT_IJ 2
/*
* Denoting 2e integrals (ij|kl),
* AO2MOnr_e1_drv transforms ij for ksh_start <= k shell < ksh_end.
* The transformation C_pi C_qj (pq|k*) coefficients are stored in
* mo_coeff, C_pi and C_qj are offset by i_start and i_count, j_start and j_count.
* The output eri is an 2D array, ordered as (kl-AO-pair,ij-MO-pair) in
* C-order. Transposing is needed before calling AO2MOnr_e2_drv.
*
* AO2MOnr_e2_drv transforms kl for nijcount of ij pairs.
* vin is assumed to be an C-array of (ij-MO-pair, kl-AO-pair)
* vout is an C-array of (ij-MO-pair, kl-MO-pair)
*
* ftranse1 and ftranse2
* ---------------------
* AO2MOtranse1_nr_s4, AO2MOtranse1_nr_s2ij, AO2MOtranse1_nr_s2kl AO2MOtranse1_nr_s1
* AO2MOtranse2_nr_s4, AO2MOtranse2_nr_s2ij, AO2MOtranse2_nr_s2kl AO2MOtranse2_nr_s1
* Labels s4, s2, s1 are used to label the AO integral symmetry. The
* symmetry of transformed integrals are controled by function fmmm
*
* fmmm
* ----
* fmmm dim requirements:
* | vout | eri
* ---------------------+-------------------------------+-------------------
* AO2MOmmm_nr_s2_s2 | [:,bra_count*(bra_count+1)/2] | [:,nao*(nao+1)/2]
* | and bra_count==ket_count |
* AO2MOmmm_nr_s2_iltj | [:,bra_count*ket_count] | [:,nao*nao]
* AO2MOmmm_nr_s2_igtj | [:,bra_count*ket_count] | [:,nao*nao]
* AO2MOmmm_nr_s1_iltj | [:,bra_count*ket_count] | [:,nao*nao]
* AO2MOmmm_nr_s1_igtj | [:,bra_count*ket_count] | [:,nao*nao]
*
* AO2MOmmm_nr_s1_iltj, AO2MOmmm_nr_s1_igtj, AO2MOmmm_nr_s2_s2,
* AO2MOmmm_nr_s2_iltj, AO2MOmmm_nr_s2_igtj
* Pick a proper function from the 5 kinds of AO2MO transformation.
* 1. AO integral I_ij != I_ji, use
* AO2MOmmm_nr_s1_iltj or AO2MOmmm_nr_s1_igtj
* 2. AO integral I_ij == I_ji, but the MO coefficients for bra and ket
* are different, use
* AO2MOmmm_nr_s2_iltj or AO2MOmmm_nr_s2_igtj
* 3. AO integral I_ij == I_ji, and the MO coefficients are the same for
* bra and ket, use
* AO2MOmmm_nr_s2_s2
*
* ftrans | allowed fmmm
* ----------------------+---------------------
* AO2MOtranse1_nr_s4 | AO2MOmmm_nr_s2_s2
* AO2MOtranse1_nr_s2ij | AO2MOmmm_nr_s2_iltj
* AO2MOtranse2_nr_s4 | AO2MOmmm_nr_s2_igtj
* AO2MOtranse2_nr_s2kl |
* ----------------------+---------------------
* AO2MOtranse1_nr_s2kl | AO2MOmmm_nr_s2_s2
* AO2MOtranse2_nr_s2ij | AO2MOmmm_nr_s2_igtj
* | AO2MOmmm_nr_s2_iltj
* ----------------------+---------------------
* AO2MOtranse1_nr_s1 | AO2MOmmm_nr_s1_iltj
* AO2MOtranse2_nr_s1 | AO2MOmmm_nr_s1_igtj
*
*/
/* for m > n
* calculate the upper triangle part (of Fortran order matrix)
* _ |------- n -------| _
* diag_off [ . . . . . . . . ] |
* _ [ . . . . . . . . ] m
* [ . . . . . . . ] |
* [ . . . . . . ] _
*/
void AO2MOdtriumm_o1(int m, int n, int k, int diag_off,
double *a, double *b, double *c)
{
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int BLK = 48;
int mstart = m - MAX(0, (m-diag_off)/BLK)*BLK;
int nstart = mstart - diag_off;
int nleft;
dgemm_(&TRANS_T, &TRANS_N, &mstart, &n, &k,
&D1, a, &k, b, &k, &D0, c, &m);
for (; mstart < m; mstart+=BLK, nstart+=BLK) {
nleft = n - nstart;
dgemm_(&TRANS_T, &TRANS_N, &BLK, &nleft, &k,
&D1, a+mstart*k, &k, b+nstart*k, &k,
&D0, c+nstart*m+mstart, &m);
}
}
/* for m < n
* calculate the upper triangle part (of Fortran order matrix)
* _ |------- n -------| _
* diag_off [ . . . . . . . . ] |
* _ [ . . . . . . . . ] m
* [ . . . . . . . ] |
* [ . . . . . . ] _
*/
void AO2MOdtriumm_o2(int m, int n, int k, int diag_off,
double *a, double *b, double *c)
{
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int BLK = 48;
int nstart, nleft;
int mend = diag_off;
for (nstart = 0; nstart < m-diag_off-BLK; nstart+=BLK) {
mend += BLK;
dgemm_(&TRANS_T, &TRANS_N, &mend, &BLK, &k,
&D1, a, &k, b+nstart*k, &k,
&D0, c+nstart*m, &m);
}
nleft = n - nstart;
dgemm_(&TRANS_T, &TRANS_N, &m, &nleft, &k,
&D1, a, &k, b+nstart*k, &k,
&D0, c+nstart*m, &m);
}
/*
* s1-AO integrals to s1-MO integrals, efficient for i_count < j_count
* shape requirements:
* vout[:,bra_count*ket_count], eri[:,nao*nao]
* s1, s2 here to label the AO symmetry
*/
int AO2MOmmm_nr_s1_iltj(double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: return envs->bra_count * envs->ket_count;
case INPUT_IJ: return envs->nao * envs->nao;
}
const double D0 = 0;
const double D1 = 1;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
int nao = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
double *mo_coeff = envs->mo_coeff;
// C_pi (pq| = (iq|, where (pq| is in C-order
dgemm_(&TRANS_N, &TRANS_N, &nao, &i_count, &nao,
&D1, eri, &nao, mo_coeff+i_start*nao, &nao,
&D0, buf, &nao);
dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &nao,
&D1, mo_coeff+j_start*nao, &nao, buf, &nao,
&D0, vout, &j_count);
return 0;
}
/*
* s1-AO integrals to s1-MO integrals, efficient for i_count > j_count
* shape requirements:
* vout[:,bra_count*ket_count], eri[:,nao*nao]
*/
int AO2MOmmm_nr_s1_igtj(double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: return envs->bra_count * envs->ket_count;
case INPUT_IJ: return envs->nao * envs->nao;
}
const double D0 = 0;
const double D1 = 1;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
int nao = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
double *mo_coeff = envs->mo_coeff;
// C_qj (pq| = (pj|, where (pq| is in C-order
dgemm_(&TRANS_T, &TRANS_N, &j_count, &nao, &nao,
&D1, mo_coeff+j_start*nao, &nao, eri, &nao,
&D0, buf, &j_count);
dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &nao,
&D1, buf, &j_count, mo_coeff+i_start*nao, &nao,
&D0, vout, &j_count);
return 0;
}
/*
* s2-AO integrals to s2-MO integrals
* shape requirements:
* vout[:,bra_count*(bra_count+1)/2] and bra_count==ket_count,
* eri[:,nao*(nao+1)/2]
* first s2 is the AO symmetry, second s2 is the MO symmetry
*/
int AO2MOmmm_nr_s2_s2(double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: assert(envs->bra_count == envs->ket_count);
return envs->bra_count * (envs->bra_count+1) / 2;
case INPUT_IJ: return envs->nao * (envs->nao+1) / 2;
}
const double D0 = 0;
const double D1 = 1;
const char SIDE_L = 'L';
const char UPLO_U = 'U';
int nao = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
double *mo_coeff = envs->mo_coeff;
double *buf1 = buf + nao*i_count;
int i, j, ij;
// C_pi (pq| = (iq|, where (pq| is in C-order
dsymm_(&SIDE_L, &UPLO_U, &nao, &i_count,
&D1, eri, &nao, mo_coeff+i_start*nao, &nao,
&D0, buf, &nao);
AO2MOdtriumm_o1(j_count, i_count, nao, 0,
mo_coeff+j_start*nao, buf, buf1);
for (i = 0, ij = 0; i < i_count; i++) {
for (j = 0; j <= i; j++, ij++) {
vout[ij] = buf1[j];
}
buf1 += j_count;
}
return 0;
}
/*
* s2-AO integrals to s1-MO integrals, efficient for i_count < j_count
* shape requirements:
* vout[:,bra_count*ket_count], eri[:,nao*(nao+1)/2]
*/
int AO2MOmmm_nr_s2_iltj(double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: return envs->bra_count * envs->ket_count;
case INPUT_IJ: return envs->nao * (envs->nao+1) / 2;
}
const double D0 = 0;
const double D1 = 1;
const char SIDE_L = 'L';
const char UPLO_U = 'U';
const char TRANS_T = 'T';
const char TRANS_N = 'N';
int nao = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
double *mo_coeff = envs->mo_coeff;
// C_pi (pq| = (iq|, where (pq| is in C-order
dsymm_(&SIDE_L, &UPLO_U, &nao, &i_count,
&D1, eri, &nao, mo_coeff+i_start*nao, &nao,
&D0, buf, &nao);
// C_qj (iq| = (ij|
dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &nao,
&D1, mo_coeff+j_start*nao, &nao, buf, &nao,
&D0, vout, &j_count);
return 0;
}
/*
* s2-AO integrals to s1-MO integrals, efficient for i_count > j_count
* shape requirements:
* vout[:,bra_count*ket_count], eri[:,nao*(nao+1)/2]
*/
int AO2MOmmm_nr_s2_igtj(double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: return envs->bra_count * envs->ket_count;
case INPUT_IJ: return envs->nao * (envs->nao+1) / 2;
}
const double D0 = 0;
const double D1 = 1;
const char SIDE_L = 'L';
const char UPLO_U = 'U';
const char TRANS_T = 'T';
const char TRANS_N = 'N';
int nao = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
double *mo_coeff = envs->mo_coeff;
// C_qj (pq| = (pj|, where (pq| is in C-order
dsymm_(&SIDE_L, &UPLO_U, &nao, &j_count,
&D1, eri, &nao, mo_coeff+j_start*nao, &nao,
&D0, buf, &nao);
// C_pi (pj| = (ij|
dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &nao,
&D1, buf, &nao, mo_coeff+i_start*nao, &nao,
&D0, vout, &j_count);
return 0;
}
/*
* transform bra, s1 to label AO symmetry
*/
int AO2MOmmm_bra_nr_s1(double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case 1: return envs->bra_count * envs->nao;
case 2: return envs->nao * envs->nao;
}
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
int nao = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
double *mo_coeff = envs->mo_coeff;
dgemm_(&TRANS_N, &TRANS_N, &nao, &i_count, &nao,
&D1, vin, &nao, mo_coeff+i_start*nao, &nao,
&D0, vout, &nao);
return 0;
}
/*
* transform ket, s1 to label AO symmetry
*/
int AO2MOmmm_ket_nr_s1(double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: return envs->nao * envs->ket_count;
case INPUT_IJ: return envs->nao * envs->nao;
}
const double D0 = 0;
const double D1 = 1;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
int nao = envs->nao;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
double *mo_coeff = envs->mo_coeff;
dgemm_(&TRANS_T, &TRANS_N, &j_count, &nao, &nao,
&D1, mo_coeff+j_start*nao, &nao, vin, &nao,
&D0, vout, &j_count);
return 0;
}
/*
* transform bra, s2 to label AO symmetry
*/
int AO2MOmmm_bra_nr_s2(double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: return envs->bra_count * envs->nao;
case INPUT_IJ: return envs->nao * (envs->nao+1) / 2;
}
const double D0 = 0;
const double D1 = 1;
const char SIDE_L = 'L';
const char UPLO_U = 'U';
int nao = envs->nao;
int i_start = envs->bra_start;
int i_count = envs->bra_count;
double *mo_coeff = envs->mo_coeff;
dsymm_(&SIDE_L, &UPLO_U, &nao, &i_count,
&D1, vin, &nao, mo_coeff+i_start*nao, &nao,
&D0, vout, &nao);
return 0;
}
/*
* transform ket, s2 to label AO symmetry
*/
int AO2MOmmm_ket_nr_s2(double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs, int seekdim)
{
switch (seekdim) {
case OUTPUTIJ: return envs->nao * envs->ket_count;
case INPUT_IJ: return envs->nao * (envs->nao+1) / 2;
}
const double D0 = 0;
const double D1 = 1;
const char SIDE_L = 'L';
const char UPLO_U = 'U';
int nao = envs->nao;
int j_start = envs->ket_start;
int j_count = envs->ket_count;
double *mo_coeff = envs->mo_coeff;
int i, j;
dsymm_(&SIDE_L, &UPLO_U, &nao, &j_count,
&D1, vin, &nao, mo_coeff+j_start*nao, &nao,
&D0, buf, &nao);
for (j = 0; j < nao; j++) {
for (i = 0; i < j_count; i++) {
vout[i] = buf[i*nao+j];
}
vout += j_count;
}
return 0;
}
/*
* s1, s2ij, s2kl, s4 here to label the AO symmetry
* eris[ncomp,nkl,nao_pair_ij]
*/
static void s4_copy(double *eri, double *ints, int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *pints, *peri, *peri1;
switch (di) {
case 1:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pints = ints + di * dj * (l*dk+k);
for (j = 0; j < dj; j++) {
eri[j] = pints[j];
}
eri += nao2;
} }
break;
case 2:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pints = ints + di * dj * (l*dk+k);
peri = eri + istride;
for (j = 0; j < dj;j++) {
eri [j] = pints[j*2+0];
peri[j] = pints[j*2+1];
}
eri += nao2;
} }
break;
case 3:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pints = ints + di * dj * (l*dk+k);
peri = eri + istride;
peri1 = peri + istride + 1;
for (j = 0; j < dj;j++) {
eri [j] = pints[j*3+0];
peri [j] = pints[j*3+1];
peri1[j] = pints[j*3+2];
}
eri += nao2;
} }
break;
default:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pints = ints + di * dj * (l*dk+k);
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
//TODO: call nontemporal write to avoid write-allocate
peri[j] = pints[j*di+i];
}
peri += istride + i;
}
eri += nao2;
} }
}
}
static void s4_set0(double *eri, double *nop,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *peri, *peri1;
switch (di) {
case 1:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
for (j = 0; j < dj; j++) {
eri[j] = 0;
}
eri += nao2;
} }
break;
case 2:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
peri = eri + istride;
for (j = 0; j < dj; j++) {
eri [j] = 0;
peri[j] = 0;
}
eri += nao2;
} }
break;
case 3:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
peri = eri + istride;
peri1 = peri + istride + 1;
for (j = 0; j < dj; j++) {
eri [j] = 0;
peri [j] = 0;
peri1[j] = 0;
}
eri += nao2;
} }
break;
default:
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
//TODO: call nontemporal write to avoid write-allocate
peri[j] = 0;
}
peri += istride + i;
}
eri += nao2;
} }
}
}
static void s4_copy_keql(double *eri, double *ints,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *pints, *peri;
for (k = 0; k < dk; k++) {
for (l = 0; l <= k; l++) {
pints = ints + di * dj * (l*dk+k);
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri[j] = pints[j*di+i];
}
peri += istride + i;
}
eri += nao2;
} }
}
static void s4_set0_keql(double *eri, double *nop,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *peri;
for (k = 0; k < dk; k++) {
for (l = 0; l <= k; l++) {
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri[j] = 0;
}
peri += istride + i;
}
eri += nao2;
} }
}
static void s4_copy_ieqj(double *eri, double *ints,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *pints, *peri;
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pints = ints + di * dj * (l*dk+k);
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
peri[j] = pints[j*di+i];
}
peri += istride + i;
}
eri += nao2;
} }
}
static void s4_set0_ieqj(double *eri, double *nop,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *peri;
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
peri[j] = 0;
}
peri += istride + i;
}
eri += nao2;
} }
}
static void s4_copy_keql_ieqj(double *eri, double *ints,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *pints, *peri;
for (k = 0; k < dk; k++) {
for (l = 0; l <= k; l++) {
pints = ints + di * dj * (l*dk+k);
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
peri[j] = pints[j*di+i];
}
peri += istride + i;
}
eri += nao2;
} }
}
static void s4_set0_keql_ieqj(double *eri, double *nop,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *peri;
for (k = 0; k < dk; k++) {
for (l = 0; l <= k; l++) {
peri = eri;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
peri[j] = 0;
}
peri += istride + i;
}
eri += nao2;
} }
}
static void s2kl_copy_keql(double *eri, double *ints,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *pints;
for (k = 0; k < dk; k++) {
for (l = 0; l <= k; l++) {
pints = ints + di * dj * (l*dk+k);
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
eri[i*istride+j] = pints[j*di+i];
}
}
eri += nao2;
} }
}
static void s2kl_set0_keql(double *eri, double *nop,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
for (k = 0; k < dk; k++) {
for (l = 0; l <= k; l++) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
eri[i*istride+j] = 0;
}
}
eri += nao2;
} }
}
static void s1_copy(double *eri, double *ints,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
double *pints;
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
pints = ints + di * dj * (l*dk+k);
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
eri[i*istride+j] = pints[j*di+i];
}
}
eri += nao2;
} }
}
static void s1_set0(double *eri, double *nop,
int di, int dj, int dk, int dl,
int istride, size_t nao2)
{
int i, j, k, l;
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
eri[i*istride+j] = 0;
}
}
eri += nao2;
} }
}
#define DISTR_INTS_BY(fcopy, fset0, istride) \
if ((*fprescreen)(shls, envs->vhfopt, envs->atm, envs->bas, envs->env) && \
(*intor)(buf, NULL, shls, envs->atm, envs->natm, \
envs->bas, envs->nbas, envs->env, envs->cintopt, NULL)) { \
pbuf = buf; \
for (icomp = 0; icomp < envs->ncomp; icomp++) { \
peri = eri + nao2 * nkl * icomp + ioff + ao_loc[jsh]; \
fcopy(peri, pbuf, di, dj, dk, dl, istride, nao2); \
pbuf += di * dj * dk * dl; \
} \
} else { \
for (icomp = 0; icomp < envs->ncomp; icomp++) { \
peri = eri + nao2 * nkl * icomp + ioff + ao_loc[jsh]; \
fset0(peri, pbuf, di, dj, dk, dl, istride, nao2); \
} \
}
void AO2MOfill_nr_s1(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf,
int nkl, int ish, struct _AO2MOEnvs *envs)
{
const int nao = envs->nao;
const size_t nao2 = nao * nao;
const int *ao_loc = envs->ao_loc;
const int klsh_start = envs->klsh_start;
const int klsh_end = klsh_start + envs->klsh_count;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int ioff = ao_loc[ish] * nao;
int kl, jsh, ksh, lsh, dj, dk, dl;
int icomp;
int shls[4];
double *pbuf, *peri;
shls[0] = ish;
for (kl = klsh_start; kl < klsh_end; kl++) {
// kl = k * (k+1) / 2 + l
ksh = kl / envs->nbas;
lsh = kl - ksh * envs->nbas;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
for (jsh = 0; jsh < envs->nbas; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
DISTR_INTS_BY(s1_copy, s1_set0, nao);
}
eri += nao2 * dk * dl;
}
}
void AO2MOfill_nr_s2ij(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf,
int nkl, int ish, struct _AO2MOEnvs *envs)
{
const int nao = envs->nao;
const size_t nao2 = nao * (nao+1) / 2;
const int *ao_loc = envs->ao_loc;
const int klsh_start = envs->klsh_start;
const int klsh_end = klsh_start + envs->klsh_count;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int ioff = ao_loc[ish] * (ao_loc[ish]+1) / 2;
int kl, jsh, ksh, lsh, dj, dk, dl;
int icomp;
int shls[4];
double *pbuf = buf;
double *peri;
shls[0] = ish;
for (kl = klsh_start; kl < klsh_end; kl++) {
// kl = k * (k+1) / 2 + l
ksh = kl / envs->nbas;
lsh = kl - ksh * envs->nbas;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
for (jsh = 0; jsh < ish; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
DISTR_INTS_BY(s4_copy, s4_set0, ao_loc[ish]+1);
}
jsh = ish;
dj = di;
shls[1] = jsh;
DISTR_INTS_BY(s4_copy_ieqj, s4_set0_ieqj, ao_loc[ish]+1);
eri += nao2 * dk * dl;
}
}
void AO2MOfill_nr_s2kl(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf,
int nkl, int ish, struct _AO2MOEnvs *envs)
{
const int nao = envs->nao;
const size_t nao2 = nao * nao;
const int *ao_loc = envs->ao_loc;
const int klsh_start = envs->klsh_start;
const int klsh_end = klsh_start + envs->klsh_count;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int ioff = ao_loc[ish] * nao;
int kl, jsh, ksh, lsh, dj, dk, dl;
int icomp;
int shls[4];
double *pbuf = buf;
double *peri;
shls[0] = ish;
for (kl = klsh_start; kl < klsh_end; kl++) {
// kl = k * (k+1) / 2 + l
ksh = (int)(sqrt(2*kl+.25) - .5 + 1e-7);
lsh = kl - ksh * (ksh+1) / 2;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if (ksh == lsh) {
for (jsh = 0; jsh < envs->nbas; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
DISTR_INTS_BY(s2kl_copy_keql, s2kl_set0_keql, nao);
}
eri += nao2 * dk*(dk+1)/2;
} else {
for (jsh = 0; jsh < envs->nbas; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
DISTR_INTS_BY(s1_copy, s1_set0, nao);
}
eri += nao2 * dk * dl;
} }
}
void AO2MOfill_nr_s4(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf,
int nkl, int ish, struct _AO2MOEnvs *envs)
{
const int nao = envs->nao;
const size_t nao2 = nao * (nao+1) / 2;
const int *ao_loc = envs->ao_loc;
const int klsh_start = envs->klsh_start;
const int klsh_end = klsh_start + envs->klsh_count;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int ioff = ao_loc[ish] * (ao_loc[ish]+1) / 2;
int kl, jsh, ksh, lsh, dj, dk, dl;
int icomp;
int shls[4];
double *pbuf = buf;
double *peri;
shls[0] = ish;
for (kl = klsh_start; kl < klsh_end; kl++) {
// kl = k * (k+1) / 2 + l
ksh = (int)(sqrt(2*kl+.25) - .5 + 1e-7);
lsh = kl - ksh * (ksh+1) / 2;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if (ksh == lsh) {
for (jsh = 0; jsh < ish; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
DISTR_INTS_BY(s4_copy_keql, s4_set0_keql,
ao_loc[ish]+1);
}
jsh = ish;
dj = di;
shls[1] = ish;
DISTR_INTS_BY(s4_copy_keql_ieqj, s4_set0_keql_ieqj,
ao_loc[ish]+1);
eri += nao2 * dk*(dk+1)/2;
} else {
for (jsh = 0; jsh < ish; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[1] = jsh;
DISTR_INTS_BY(s4_copy, s4_set0, ao_loc[ish]+1);
}
jsh = ish;
dj = di;
shls[1] = ish;
DISTR_INTS_BY(s4_copy_ieqj, s4_set0_ieqj, ao_loc[ish]+1);
eri += nao2 * dk * dl;
} }
}
/*
* ************************************************
* s1, s2ij, s2kl, s4 here to label the AO symmetry
*/
void AO2MOtranse1_nr_s1(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
size_t nao2 = envs->nao * envs->nao;
(*fmmm)(vout+ij_pair*row_id, vin+nao2*row_id, buf, envs, 0);
}
void AO2MOtranse1_nr_s2ij(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
size_t nao2 = nao*(nao+1)/2;
NPdunpack_tril(nao, vin+nao2*row_id, buf, 0);
(*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0);
}
void AO2MOtranse1_nr_s2(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_nr_s2ij(fmmm, row_id, vout, vin, buf, envs);
}
void AO2MOtranse1_nr_s2kl(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_nr_s1(fmmm, row_id, vout, vin, buf, envs);
}
void AO2MOtranse1_nr_s4(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse1_nr_s2ij(fmmm, row_id, vout, vin, buf, envs);
}
/*
* ************************************************
* s1, s2ij, s2kl, s4 here to label the AO symmetry
*/
void AO2MOtranse2_nr_s1(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ);
(*fmmm)(vout+ij_pair*row_id, vin+nao2*row_id, buf, envs, 0);
}
void AO2MOtranse2_nr_s2ij(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s1(fmmm, row_id, vout, vin, buf, envs);
}
void AO2MOtranse2_nr_s2kl(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ);
NPdunpack_tril(nao, vin+nao2*row_id, buf, 0);
(*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0);
}
void AO2MOtranse2_nr_s2(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs);
}
void AO2MOtranse2_nr_s4(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs);
}
/*
* ************************************************
* sort (shell-based) integral blocks then transform
*/
void AO2MOsortranse2_nr_s1(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
int *ao_loc = envs->ao_loc;
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ);
int ish, jsh, di, dj;
int i, j, ij;
double *pbuf;
vin += nao2 * row_id;
ij = 0;
for (ish = 0; ish < envs->nbas; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
for (jsh = 0; jsh < envs->nbas; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh];
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++, ij++) {
pbuf[i*nao+j] = vin[ij];
} }
}
}
(*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0);
}
void AO2MOsortranse2_nr_s2ij(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s1(fmmm, row_id, vout, vin, buf, envs);
}
void AO2MOsortranse2_nr_s2kl(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
int nao = envs->nao;
int *ao_loc = envs->ao_loc;
size_t ij_pair = (*fmmm)(NULL, NULL, buf, envs, OUTPUTIJ);
size_t nao2 = (*fmmm)(NULL, NULL, buf, envs, INPUT_IJ);
int ish, jsh, di, dj;
int i, j, ij;
double *pbuf;
vin += nao2 * row_id;
for (ish = 0; ish < envs->nbas; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
for (jsh = 0; jsh < ish; jsh++) {
dj = ao_loc[jsh+1] - ao_loc[jsh];
pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh];
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pbuf[i*nao+j] = vin[i*dj+j];
} }
vin += di * dj;
}
// lower triangle block when ish == jsh
pbuf = buf + ao_loc[ish] * nao + ao_loc[ish];
for (ij = 0, i = 0; i < di; i++) {
for (j = 0; j <= i; j++, ij++) {
pbuf[i*nao+j] = vin[ij];
} }
vin += di * (di+1) / 2;
}
(*fmmm)(vout+ij_pair*row_id, buf, buf+nao*nao, envs, 0);
}
void AO2MOsortranse2_nr_s2(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs);
}
void AO2MOsortranse2_nr_s4(int (*fmmm)(), int row_id,
double *vout, double *vin, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s2kl(fmmm, row_id, vout, vin, buf, envs);
}
/*
* ************************************************
* combine ftrans and fmmm
*/
void AO2MOtrans_nr_s1_iltj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s1(AO2MOmmm_nr_s1_iltj, row_id, vout, eri, buf, envs);
}
void AO2MOtrans_nr_s1_igtj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s1(AO2MOmmm_nr_s1_igtj, row_id, vout, eri, buf, envs);
}
void AO2MOtrans_nr_sorts1_iltj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s1(AO2MOmmm_nr_s1_iltj, row_id, vout, eri, buf,envs);
}
void AO2MOtrans_nr_sorts1_igtj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s1(AO2MOmmm_nr_s1_igtj, row_id, vout, eri, buf,envs);
}
void AO2MOtrans_nr_s2_iltj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s2kl(AO2MOmmm_nr_s2_iltj, row_id, vout, eri, buf, envs);
}
void AO2MOtrans_nr_s2_igtj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s2kl(AO2MOmmm_nr_s2_igtj, row_id, vout, eri, buf, envs);
}
void AO2MOtrans_nr_s2_s2(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOtranse2_nr_s2kl(AO2MOmmm_nr_s2_s2, row_id, vout, eri, buf, envs);
}
void AO2MOtrans_nr_sorts2_iltj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s2kl(AO2MOmmm_nr_s2_iltj, row_id, vout, eri, buf, envs);
}
void AO2MOtrans_nr_sorts2_igtj(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s2kl(AO2MOmmm_nr_s2_igtj, row_id, vout, eri, buf, envs);
}
void AO2MOtrans_nr_sorts2_s2(void *nop, int row_id,
double *vout, double *eri, double *buf,
struct _AO2MOEnvs *envs)
{
AO2MOsortranse2_nr_s2kl(AO2MOmmm_nr_s2_s2, row_id, vout, eri, buf,envs);
}
/*
* ************************************************
* Denoting 2e integrals (ij|kl),
* transform ij for ksh_start <= k shell < ksh_end.
* The transformation C_pi C_qj (pq|k*) coefficients are stored in
* mo_coeff, C_pi and C_qj are offset by i_start and i_count, j_start and j_count
*
* The output eri is an 2D array, ordered as (kl-AO-pair,ij-MO-pair) in
* C-order. Transposing is needed before calling AO2MOnr_e2_drv.
* eri[ncomp,nkl,mo_i,mo_j]
*/
void AO2MOnr_e1_drv(int (*intor)(), void (*fill)(), void (*ftrans)(), int (*fmmm)(),
double *eri, double *mo_coeff,
int klsh_start, int klsh_count, int nkl, int ncomp,
int *orbs_slice, int *ao_loc,
CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
int nao = ao_loc[nbas];
double *eri_ao = malloc(sizeof(double) * nao*nao*nkl*ncomp);
assert(eri_ao);
AO2MOnr_e1fill_drv(intor, fill, eri_ao, klsh_start, klsh_count,
nkl, ncomp, ao_loc, cintopt, vhfopt,
atm, natm, bas, nbas, env);
AO2MOnr_e2_drv(ftrans, fmmm, eri, eri_ao, mo_coeff,
nkl*ncomp, nao, orbs_slice, ao_loc, nbas);
free(eri_ao);
}
void AO2MOnr_e2_drv(void (*ftrans)(), int (*fmmm)(),
double *vout, double *vin, double *mo_coeff,
int nij, int nao, int *orbs_slice, int *ao_loc, int nbas)
{
struct _AO2MOEnvs envs;
envs.bra_start = orbs_slice[0];
envs.bra_count = orbs_slice[1] - orbs_slice[0];
envs.ket_start = orbs_slice[2];
envs.ket_count = orbs_slice[3] - orbs_slice[2];
envs.nao = nao;
envs.nbas = nbas;
envs.ao_loc = ao_loc;
envs.mo_coeff = mo_coeff;
#pragma omp parallel default(none) \
shared(ftrans, fmmm, vout, vin, nij, envs, nao, orbs_slice)
{
int i;
int i_count = envs.bra_count;
int j_count = envs.ket_count;
double *buf = malloc(sizeof(double) * (nao+i_count) * (nao+j_count));
#pragma omp for schedule(dynamic)
for (i = 0; i < nij; i++) {
(*ftrans)(fmmm, i, vout, vin, buf, &envs);
}
free(buf);
}
}
/*
* The size of eri is ncomp*nkl*nao*nao, note the upper triangular part
* may not be filled
*/
void AO2MOnr_e1fill_drv(int (*intor)(), void (*fill)(), double *eri,
int klsh_start, int klsh_count, int nkl, int ncomp,
int *ao_loc, CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
int i;
int nao = ao_loc[nbas];
int dmax = 0;
for (i= 0; i< nbas; i++) {
dmax = MAX(dmax, ao_loc[i+1]-ao_loc[i]);
}
struct _AO2MOEnvs envs = {natm, nbas, atm, bas, env, nao,
klsh_start, klsh_count, 0, 0, 0, 0,
ncomp, ao_loc, NULL, cintopt, vhfopt};
int (*fprescreen)();
if (vhfopt) {
fprescreen = vhfopt->fprescreen;
} else {
fprescreen = CVHFnoscreen;
}
#pragma omp parallel default(none) \
shared(fill, fprescreen, eri, envs, intor, nkl, nbas, dmax, ncomp)
{
int ish;
double *buf = malloc(sizeof(double)*dmax*dmax*dmax*dmax*ncomp);
#pragma omp for schedule(dynamic, 1)
for (ish = 0; ish < nbas; ish++) {
(*fill)(intor, fprescreen, eri, buf, nkl, ish, &envs);
}
free(buf);
}
}
|
nstream.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
/**********************************************************************
NAME: nstream
PURPOSE: To compute memory bandwidth when adding a vector of a given
number of double precision values to the scalar multiple of
another vector of the same length, and storing the result in
a third vector.
USAGE: The program takes as input the number of threads, the number
of iterations to loop over the triad vectors, the length of the
vectors, and the offset between vectors
<progname> <# threads> <# iterations> <vector length> <offset>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
external functions are used in this program:
wtime()
bail_out()
checkTRIADresults()
NOTES: Bandwidth is determined as the number of words read, plus the
number of words written, times the size of the words, divided
by the execution time. For a vector length of N, the total
number of words read and written is 3*N*sizeof(double).
HISTORY: This code is loosely based on the Stream benchmark by John
McCalpin, but does not follow all the Stream rules. Hence,
reported results should not be associated with Stream in
external publications
REVISION: Modified by Tim Mattson to handle OpenMP correctly
REVISION: Modified by Rob Van der Wijngaart, December 2005, to
parameterize vector size and offsets through compiler flags.
Also removed all Stream cases except TRIAD.
REVISION: Modified by Rob Van der Wijngaart, May 2006, to introduce
dependence between successive triad operations. This is
necessary to avoid dead code elimination
**********************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
#define DEFAULTMAXLENGTH 2000000
#ifdef MAXLENGTH
#if MAXLENGTH > 0
#define N MAXLENGTH
#else
#define N DEFAULTMAXLENGTH
#endif
#else
#define N DEFAULTMAXLENGTH
#endif
#ifdef STATIC_ALLOCATION
/* use static to make sure it goes on the heap, not the stack */
static double a[N];
#else
static double * RESTRICT a;
#endif
static double * RESTRICT b;
static double * RESTRICT c;
#define SCALAR 3.0
static int checkTRIADresults(int, long int);
int main(int argc, char **argv)
{
long int j, k; /* dummies */
double scalar; /* constant used in Triad operation */
int iterations; /* number of times vector loop gets repeated */
long int length, /* total vector length */
offset; /* offset between vectors a and b, and b and c */
double bytes; /* memory IO size */
size_t space; /* memory used for a single vector */
double nstream_time, /* timing parameters */
avgtime = 0.0,
maxtime = 0.0,
mintime = 366.0*24.0*3600.0; /* set the minimum time to a
large value; one leap year should be enough */
int nthread_input; /* thread parameters */
int nthread;
int num_error=0; /* flag that signals that requested and
obtained numbers of threads are the same */
/**********************************************************************************
* process and test input parameters
***********************************************************************************/
if (argc != 5){
printf("Usage: %s <# threads> <# iterations> <vector length> <offset>\n", *argv);
exit(EXIT_FAILURE);
}
nthread_input = atoi(*++argv);
iterations = atoi(*++argv);
length = atol(*++argv);
offset = atol(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
if ((iterations < 1)) {
printf("ERROR: Invalid number of iterations: %d\n", iterations);
exit(EXIT_FAILURE);
}
if (length < 0) {
printf("ERROR: Invalid vector length: %ld\n", length);
exit(EXIT_FAILURE);
}
if (offset < 0) {
printf("ERROR: Incvalid array offset: %ld\n", offset);
exit(EXIT_FAILURE);
}
#ifdef STATIC_ALLOCATION
if ((3*length + 2*offset) > N) {
printf("ERROR: vector length/offset %ld/%ld too ", length, offset);
printf("large; increase MAXLENGTH in Makefile or decrease vector length\n");
exit(EXIT_FAILURE);
}
#endif
omp_set_num_threads(nthread_input);
#ifndef STATIC_ALLOCATION
space = (3*length + 2*offset)*sizeof(double);
a = (double *) malloc(space);
if (!a) {
printf("ERROR: Could not allocate %ld words for vectors\n",
3*length+2*offset);
exit(EXIT_FAILURE);
}
#endif
b = a + length + offset;
c = b + length + offset;
#pragma omp parallel private(j,k)
{
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP stream triad: A = B + scalar*C\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %i;\n",nthread_input);
printf("Vector length = %ld\n", length);
printf("Offset = %ld\n", offset);
printf("Number of iterations = %d\n", iterations);
}
}
bail_out(num_error);
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) {
a[j] = 0.0;
b[j] = 2.0;
c[j] = 2.0;
}
/* --- MAIN LOOP --- repeat Triad iterations times --- */
scalar = SCALAR;
for (k=0; k<iterations; k++) {
#pragma omp barrier
#pragma omp master
{
nstream_time = wtime();
}
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) a[j] = b[j]+scalar*c[j];
#pragma omp master
if (k>0 || iterations==1) { /* skip the first iteration */
nstream_time = wtime() - nstream_time;
avgtime = avgtime + nstream_time;
mintime = MIN(mintime, nstream_time);
maxtime = MAX(maxtime, nstream_time);
}
/* insert a dependency between iterations to avoid dead-code elimination */
#pragma omp for
#pragma vector always
for (j=0; j<length; j++) b[j] = a[j];
}
} /* end of OpenMP parallel region */
/*********************************************************************
** Analyze and output results.
*********************************************************************/
bytes = 3.0 * sizeof(double) * length;
if (checkTRIADresults(iterations, length)) {
avgtime = avgtime/(double)(MAX(iterations-1,1));
printf("Rate (MB/s): %lf, Avg time (s): %lf, Min time (s): %lf",
1.0E-06 * bytes/mintime, avgtime, mintime);
printf(", Max time (s): %lf\n", maxtime);
}
else exit(EXIT_FAILURE);
return 0;
}
int checkTRIADresults (int iterations, long int length) {
double aj, bj, cj, scalar, asum;
double epsilon = 1.e-8;
long int j,k;
/* reproduce initialization */
aj = 0.0;
bj = 2.0;
cj = 2.0;
/* now execute timing loop */
scalar = SCALAR;
for (k=0; k<iterations; k++) {
aj = bj+scalar*cj;
bj = aj;
}
aj = aj * (double) (length);
asum = 0.0;
for (j=0; j<length; j++) asum += a[j];
#ifdef VERBOSE
printf ("Results Comparison: \n");
printf (" Expected checksum: %f\n",aj);
printf (" Observed checksum: %f\n",asum);
#endif
if (ABS(aj-asum)/asum > epsilon) {
printf ("Failed Validation on output array\n");
#ifndef VERBOSE
printf (" Expected checksum: %f \n",aj);
printf (" Observed checksum: %f \n",asum);
#endif
return (0);
}
else {
printf ("Solution Validates\n");
return (1);
}
}
|
task_early_fulfill.c | // RUN: %libomp-compile -fopenmp-version=50 && env OMP_NUM_THREADS='3' \
// RUN: %libomp-run | %sort-threads | FileCheck %s
// Checked gcc 10.1 still does not support detach clause on task construct.
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9, gcc-10
// clang supports detach clause since version 11.
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// icc compiler does not support detach clause.
// UNSUPPORTED: icc
#include "callback.h"
#include <omp.h>
int main() {
#pragma omp parallel
#pragma omp master
{
omp_event_handle_t event;
#pragma omp task detach(event) if (0)
{ omp_fulfill_event(event); }
#pragma omp taskwait
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin:
// CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]],
// CHECK-SAME: parent_task_frame.exit=[[NULL]],
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: requested_team_size=3,
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit=0x{{[0-f]+}},
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]],
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=18446744073709551615,
// CHECK-SAME: prior_task_status=ompt_task_early_fulfill=5
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_complete=1
|
GB_binop__ne_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_int64
// A.*B function (eWiseMult): GB_AemultB__ne_int64
// A*D function (colscale): GB_AxD__ne_int64
// D*A function (rowscale): GB_DxB__ne_int64
// C+=B function (dense accum): GB_Cdense_accumB__ne_int64
// C+=b function (dense accum): GB_Cdense_accumb__ne_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_int64
// C=scalar+B GB_bind1st__ne_int64
// C=scalar+B' GB_bind1st_tran__ne_int64
// C=A+scalar GB_bind2nd__ne_int64
// C=A'+scalar GB_bind2nd_tran__ne_int64
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT64 || GxB_NO_NE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__ne_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
special_random_ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#ifndef LIBND4J_SPECIAL_RANDOM_OPS_H
#define LIBND4J_SPECIAL_RANDOM_OPS_H
#include <ops/random_ops.h>
#include <helpers/shape.h>
namespace randomOps {
//////////////////////////////////////////////////////////////////////
template<typename T>
class Choice {
public:
method_idx
method_X
method_XY
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
/**
* X holds data,
* Y holds probabilities
* Z will hold results
*/
// TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0
//T probSum = extraArguments[0];
__shared__ Nd4jLong xLength;
__shared__ Nd4jLong yLength;
__shared__ Nd4jLong zLength;
__shared__ Nd4jLong xEWS;
__shared__ Nd4jLong yEWS;
__shared__ Nd4jLong zEWS;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
xLength = shape::length(xShapeBuffer);
yLength = shape::length(yShapeBuffer);
zLength = shape::length(zShapeBuffer);
xEWS = shape::elementWiseStride(xShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) {
for (Nd4jLong e = tid; e < zLength; e+=blockDim.x * gridDim.x) {
T prob = buffer->relativeT<T>(e);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
T relProb = y[f * yEWS];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
z[e * zEWS] = x[f * xEWS];
f += yLength;
}
__syncthreads();
}
__syncthreads();
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
__shared__ int xRank;
__shared__ int yRank;
__shared__ int zRank;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *zShape;
__shared__ Nd4jLong *xStride;
__shared__ Nd4jLong *yStride;
__shared__ Nd4jLong *zStride;
if (threadIdx.x == 0) {
xRank = shape::rank(xShapeBuffer);
yRank = shape::rank(yShapeBuffer);
zRank = shape::rank(zShapeBuffer);
xShape = shape::shapeOf(xShapeBuffer);
yShape = shape::shapeOf(yShapeBuffer);
zShape = shape::shapeOf(zShapeBuffer);
xStride = shape::stride(xShapeBuffer);
yStride = shape::stride(yShapeBuffer);
zStride = shape::stride(zShapeBuffer);
}
__syncthreads();
for (Nd4jLong i = tid; i < zLength; i+=blockDim.x * gridDim.x) {
shape::ind2sub(zRank, zShape, i, zCoord);
auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank);
T prob = buffer->relativeT<T>(i);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
shape::ind2sub(yRank, yShape, i, yCoord);
auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank);
T relProb = y[yOffset2];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
shape::ind2sub(xRank, xShape, f, xCoord);
auto xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank);
z[zOffset2] = x[xOffset2];
f += yLength;
}
__syncthreads();
}
__syncthreads();
}
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
/**
* X holds data,
* Y holds probabilities
* Z will hold results
*/
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
// TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0
//T probSum = extraArguments[0];
Nd4jLong yLength = shape::length(yShapeBuffer);
Nd4jLong zLength = shape::length(zShapeBuffer);
auto xEWS = shape::elementWiseStride(xShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) {
#pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided)
for (Nd4jLong e = 0; e < zLength; e++) {
T prob = buffer->relativeT<T>(e);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
T relProb = y[f * yEWS];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
z[e * zEWS] = x[f * xEWS];
f += yLength;
}
}
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong zCoord[MAX_RANK];
int xRank = shape::rank(xShapeBuffer);
int yRank = shape::rank(yShapeBuffer);
int zRank = shape::rank(zShapeBuffer);
auto xShape = shape::shapeOf(xShapeBuffer);
auto yShape = shape::shapeOf(yShapeBuffer);
auto zShape = shape::shapeOf(zShapeBuffer);
auto xStride = shape::stride(xShapeBuffer);
auto yStride = shape::stride(yShapeBuffer);
auto zStride = shape::stride(zShapeBuffer);
#pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided)
for (Nd4jLong i = 0; i < zLength; i++) {
shape::ind2sub(zRank, zShape, i, zCoord);
auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank);
T prob = buffer->relativeT<T>(i);
T cumProb = (T) 0.0f;
for (Nd4jLong f = 0; f < yLength; f++) {
shape::ind2sub(yRank, yShape, i, yCoord);
auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank);
T relProb = y[yOffset2];
cumProb += relProb;
if (prob <= cumProb || f == yLength - 1) {
shape::ind2sub(xRank, xShape, f, xCoord);
Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank);
z[zOffset2] = x[xOffset2];
f += yLength;
}
}
}
}
// update rng state
buffer->rewindH(zLength);
}
};
//////////////////////////////////////////////////////////////////////
/**
* This Op produces random values within specified boundaries. Distribuion is Gaussian
*/
template<typename T>
class GaussianDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
__shared__ T epsilon;
__shared__ T two_pi;
__shared__ Nd4jLong zLength;
__shared__ Nd4jLong zEWS;
__shared__ Nd4jLong yEWS;
__shared__ T mean;
__shared__ T stddev;
__shared__ int step;
__shared__ T *tZ;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem);
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer));
zLength = shape::length(zShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
epsilon = static_cast<T>(1e-5);
two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846);
mean = extraArguments[0];
stddev = extraArguments[1];
step = (blockDim.x * gridDim.x);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += step) {
// we need to get random values
tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f));
// fix for "next rng value"
if (e + 1 >= zLength && e % 2 == 0) {
tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, static_cast<T>(1.0f));
}
T realMean = y == z ? mean : y[e * yEWS];
__syncthreads();
if (e % 2 == 0)
z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean;
else
z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean;
__syncthreads();
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void
specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846);
auto zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (zLength / _threads) + 8;
// we're enforcing even chunks, since it's mandatory for this algorithm
span -= span % 2;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
T mean = extraArguments[0];
T stddev = extraArguments[1];
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T z0, z1;
T u0, u1;
T lnU0;
bool generated = false;
for (Nd4jLong e = start; e < end; e++) {
if (!generated) {
/*
* Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries
*/
u0 = buffer->relativeT<T>(e, static_cast<T>(1e-5f), static_cast<T>(1.0f));
u1 = buffer->relativeT<T>((e + 1), static_cast<T>(1e-5f), static_cast<T>(1.0f));
lnU0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0));
z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1);
z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1);
generated = true;
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = z0 * stddev + realMean;
} else {
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = z1 * stddev + realMean;
generated = false;
}
}
}
// update rng state
buffer->rewindH(zLength);
}
};
//////////////////////////////////////////////////////////////////////
/**
* This Op produces random values within [0..N], Distribuion is binomial
*/
template<typename T>
class BinomialDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
T prob = extraArguments[1];
__shared__ Nd4jLong zLength;
__shared__ int yEWS;
__shared__ int zEWS;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem);
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *>(state);
dB = reinterpret_cast<unsigned char *> (state);
zLength = shape::length(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[(t-1) * yEWS];
}
if (randVal < prob)
success++;
}
// we need this, to eliminate excessive code branching in runtime
__syncthreads();
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = static_cast<T>(success);
}
__syncthreads();
if (trials > 0)
devBuffer->rewind(zLength * trials);
}
#endif
static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
Nd4jLong zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (zLength / _threads) + 8;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T prob = extraArguments[1];
for (Nd4jLong e = start; e < end; e++) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[(t-1) * yEWS];
}
if (randVal < prob)
success++;
}
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = static_cast<T>(success);
}
}
// update rng state
if (trials > 0)
buffer->rewindH(zLength * trials);
}
};
//////////////////////////////////////////////////////////////////////
/**
* This Op produces random values within [0..N], Distribuion is binomial
*/
template<typename T>
class BinomialDistributionEx {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
T prob = extraArguments[1];
__shared__ Nd4jLong zLength;
__shared__ int yEWS;
__shared__ int zEWS;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::random::RandomBuffer *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
zLength = shape::length(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[e * yEWS];
}
if (randVal < prob)
success++;
}
// we need this, to eliminate excessive code branching in runtime
__syncthreads();
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = (T) success;
}
__syncthreads();
if (trials > 0)
devBuffer->rewind(zLength * trials);
}
#endif
static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
int trials = (int) extraArguments[0];
Nd4jLong zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
auto span = (zLength / _threads) + 8;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T prob = extraArguments[1];
for (Nd4jLong e = start; e < end; e++) {
int success = 0;
for (int t = 1; t <= trials; t++) {
T randVal = buffer->relativeT<T>((e+1) * t);
if (y != z) {
// we're using external probs
prob = y[e * yEWS];
}
if (randVal < prob)
success++;
}
// if trials is set to 0, effectively we just have successful memset
z[e * zEWS] = static_cast<T>(success);
}
}
// update rng state
if (trials > 0)
buffer->rewindH(zLength * trials);
}
};
//////////////////////////////////////////////////////////////////////
// This Op produces random Gaussian values within [mean-2*stddev,mean+2*stddev]
template<typename T>
class TruncatedNormalDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
__shared__ T epsilon;
__shared__ T two_pi;
__shared__ Nd4jLong zLength;
__shared__ Nd4jLong zEWS;
__shared__ Nd4jLong yEWS;
__shared__ T mean;
__shared__ T stddev;
__shared__ int step;
__shared__ T *tZ;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem);
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer));
zLength = shape::length(zShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
epsilon = static_cast<T>(1e-6f);
two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846);
mean = extraArguments[0];
stddev = extraArguments[1];
step = (blockDim.x * gridDim.x);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1;
T result0, result1, u0, u1, z0, z1, uT, uP;
T ds = nd4j::math::nd4j_abs<T>(stddev) * static_cast<T>(2.0f);
for (Nd4jLong e = tid; e < middle; e += step) {
// we need to get random values
Nd4jLong generation0 = 0;
auto epm = e + middle;
T realMean0 = y == z ? mean : y[e * yEWS];
T realMean1 = y == z ? mean : y[epm * yEWS];
T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0);
T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1);
do {
u0 = buffer->relativeT<T>(e + generation0, epsilon, static_cast<T>(1.0f));
u1 = buffer->relativeT<T>(epm + generation0, epsilon, static_cast<T>(1.0f));
uT = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0));
uP = two_pi * u1;
z0 = uT * nd4j::math::nd4j_cos<T>(uP);
z1 = uT * nd4j::math::nd4j_sin<T>(uP);
result0 = z0 * stddev + realMean0;
result1 = z1 * stddev + realMean1;
generation0 += zLength;
} while (ds < aRealMean0 + nd4j::math::nd4j_abs<T>(result0) || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds);
z[e * zEWS] = result0;
if((epm) < zLength)
z[epm * zEWS] = result1;
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void
specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846);
Nd4jLong zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1;
int elementsPerThread = middle / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (middle / _threads) + 8;
// we're enforcing even chunks, since it's mandatory for this algorithm
span -= span % 2;
nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
T mean = extraArguments[0];
T stddev = extraArguments[1];
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > middle) {
end = middle;
}
T z0, z1;
T u0, u1;
T result0, result1, lnu0, lnu1;
T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f;
for (Nd4jLong e = start; e < end; e++) {
/*
* Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries
*/
Nd4jLong generation0 = 0;
auto epm = e + middle;
T realMean0 = y == z ? mean : y[e * yEWS];
T realMean1 = y == z ? mean : y[epm * yEWS];
T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0);
T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1);
do {
u0 = buffer->relativeT<T>(e + generation0, static_cast<T>(1e-6f), static_cast<T>(1.0f));
u1 = buffer->relativeT<T>((epm + generation0), static_cast<T>(1e-6f), static_cast<T>(1.0f));
lnu0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0));
lnu1 = two_pi * u1;
z0 = lnu0 * nd4j::math::nd4j_cos<T>(lnu1);
z1 = lnu0 * nd4j::math::nd4j_sin<T>(lnu1);
result0 = z0 * stddev + realMean0;
result1 = z1 * stddev + realMean1;
generation0 += zLength;
} while (aRealMean0 + nd4j::math::nd4j_abs<T>(result0) > ds || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds);
z[e*zEWS] = result0;
if(epm < zLength)
z[epm * zEWS] = result1;
}
}
// update rng state
buffer->rewindH(zLength);
}
};
//////////////////////////////////////////////////////////////////////
// This Op produces random Log-normal distribution
template<typename T>
class LogNormalDistribution {
public:
method_XY
method_X
method_idx
static const bool requiresSpecial = true;
#ifdef __CUDACC__
__device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
__shared__ T epsilon;
__shared__ T two_pi;
__shared__ Nd4jLong zLength;
__shared__ Nd4jLong zEWS;
__shared__ Nd4jLong yEWS;
__shared__ T mean;
__shared__ T stddev;
__shared__ int step;
__shared__ T *tZ;
__shared__ nd4j::random::RandomBuffer *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::random::RandomBuffer *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem);
cB = shmem;
devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
dB = reinterpret_cast<unsigned char *> (state);
tZ = reinterpret_cast<T*>(shmem + sizeof(nd4j::random::RandomBuffer));
zLength = shape::length(zShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
epsilon = static_cast<T>(1e-5);
two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846);
mean = extraArguments[0];
stddev = extraArguments[1];
step = (blockDim.x * gridDim.x);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) {
cB[e] = dB[e];
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong e = tid; e < zLength; e += step) {
// we need to get random values
tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f));
// fix for "next rng value"
if (e + 1 >= zLength && e % 2 == 0) {
tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, static_cast<T>(1.0f));
}
T realMean = y == z ? mean : y[e * yEWS];
__syncthreads();
if (e % 2 == 0)
z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean);
else
z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean);
__syncthreads();
}
__syncthreads();
devBuffer->rewind(zLength);
}
#endif
static inline void
specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) {
const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846);
Nd4jLong zLength = shape::length(zShapeBuffer);
auto yEWS = shape::elementWiseStride(yShapeBuffer);
auto zEWS = shape::elementWiseStride(zShapeBuffer);
int elementsPerThread = zLength / TAD_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (zLength / _threads) + 8;
// we're enforcing even chunks, since it's mandatory for this algorithm
span -= span % 2;
auto buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state);
T mean = extraArguments[0];
T stddev = extraArguments[1];
#pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread)
{
int tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > zLength) end = zLength;
T z0, z1;
T u0, u1;
T lnU0;
bool generated = false;
for (Nd4jLong e = start; e < end; e++) {
if (!generated) {
/*
* Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries
*/
u0 = buffer->relativeT<T>(e, static_cast<T>(1e-5f), static_cast<T>(1.0f));
u1 = buffer->relativeT<T>((e + 1), static_cast<T>(1e-5f), static_cast<T>(1.0f));
lnU0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0));
z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1);
z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1);
generated = true;
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = nd4j::math::nd4j_exp<T>(z0 * stddev + realMean);
} else {
T realMean = y == z ? mean : y[e * yEWS];
z[e * zEWS] = nd4j::math::nd4j_exp<T>(z1 * stddev + realMean);
generated = false;
}
}
}
// update rng state
buffer->rewindH(zLength);
}
};
}
#endif //LIBND4J_SPECIAL_RANDOM_OPS_H
|
example3.c | #include "emf_mie_ms.h"
#include <sys/stat.h>
#include <errno.h>
#include <png.h>
typedef struct image_data{
char dir_name[64]; // directory name to output image
int scale; // number for enlarge the output image
int ca; // multiplier for x-axis
int m; // sampling number
double rang; // range of sampling
int ts; // time step per cycle
double complex *ve,*vh; // electromagnetic field data
double me[3],mh[3]; // maximum amplitude of each field component
}IMD;
void directory_name(char *src,char *nn);
void make_directory(char *dir_name);
void eh_field_x(IMD *id,MSPD *sp);
void eh_field_y(IMD *id,MSPD *sp);
void eh_field_z(IMD *id,MSPD *sp);
void output_field(char *pl,IMD *id,MSPD *sp);
// color table
png_byte ct1[9][3]={{0x00,0x00,0x90},{0x00,0x0f,0xff},{0x00,0x90,0xff},{0x0f,0xff,0xee},
{0xff,0xff,0xff},{0xff,0xee,0x00},{0xff,0x70,0x00},{0xee,0x00,0x00},{0x7f,0x00,0x00}};
/*
png_byte ct1[9][3]={{0x00,0x00,0x90},{0x00,0x0f,0xff},{0x00,0x90,0xff},{0x0f,0xff,0xee},
{0x90,0xff,0x70},{0xff,0xee,0x00},{0xff,0x70,0x00},{0xee,0x00,0x00},{0x7f,0x00,0x00}};
*/
int main(int argc,char *argv[])
{
MSPD msp;
IMD id;
read_dat_ms(argv[1],&msp); // read data file
print_data_ms(&msp); // print data
directory_name(argv[1],id.dir_name); // remove file-extension from argv[1] and add "_images"
id.scale=1; // number for enlarge the output image
id.m=200; // sampling number
id.rang=4.0*msp.bm.lambda_0; // range of sampling
id.ts=40; // time step per cycle
id.ca=2; // multiplier for x-axis (width)
make_directory(id.dir_name);
id.ve=(double complex *)m_alloc2(id.m*id.m*id.ca*3,sizeof(double complex),"example3.c, ve");
id.vh=(double complex *)m_alloc2(id.m*id.m*id.ca*3,sizeof(double complex),"example3.c, vh");
// y=0 plane
eh_field_y(&id,&msp);
output_field("xz",&id,&msp);
// z=0 plane
eh_field_z(&id,&msp);
output_field("xy",&id,&msp);
free(id.ve);
free(id.vh);
free_ms(&msp);
return 0;
}
void directory_name(char *src,char *nn)
{
int s1,s2;
char *sd,fo[64]={},buf[54]={};
s1=strlen(src);
if(s1>54){
printf("example3.c, directory_name(), directory name is too long. exit...\n");
exit(1);
}
sprintf(fo,"%s",src);
sd=strrchr(fo,'.');
if(sd!=NULL){
s2=strlen(sd);
strncpy(buf,src,s1-s2);
sprintf(fo,"%s_images",buf);
}
sprintf(nn,"%s",fo);
}
void make_directory(char *dir_name)
{
int ret;
ret=mkdir(dir_name,S_IRWXU|S_IRWXG);
if(ret!=0 && errno!=EEXIST){
printf("failed to make directory. Exit..");
exit(1);
}
}
void eh_field_y(IMD *id,MSPD *sp)
{
double complex e[3],h[3];
double x[3],dr;
int i,j,d;
dr=id->rang*2.0/(double)(id->m-1);
for(i=0;i<3;i++){
id->me[i]=0.0;
id->mh[i]=0.0;
}
// y=0 plane
x[1]=0.0;
#pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h)
for(i=0;i<id->m;i++){
x[2]=id->rang-(double)i*dr;
for(j=0;j<id->m*id->ca;j++){
x[0]=-id->rang+(double)j*dr;
total_EH_ms(e,h,x,sp); // total field
#pragma omp critical
for(d=0;d<3;d++){
if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]);
if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]);
}
for(d=0;d<3;d++){
id->ve[i*id->m*id->ca*3+j*3+d]=e[d];
id->vh[i*id->m*id->ca*3+j*3+d]=h[d];
}
}
}
}
void eh_field_z(IMD *id,MSPD *sp)
{
double complex e[3],h[3];
double x[3],dr;
int i,j,d;
dr=id->rang*2.0/(double)(id->m-1);
for(i=0;i<3;i++){
id->me[i]=0.0;
id->mh[i]=0.0;
}
// z=0 plane
x[2]=0.0;
#pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h)
for(i=0;i<id->m;i++){
x[1]=id->rang-(double)i*dr;
for(j=0;j<id->m*id->ca;j++){
x[0]=-id->rang+(double)j*dr;
total_EH_ms(e,h,x,sp); // total field
#pragma omp critical
for(d=0;d<3;d++){
if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]);
if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]);
}
for(d=0;d<3;d++){
id->ve[i*id->m*id->ca*3+j*3+d]=e[d];
id->vh[i*id->m*id->ca*3+j*3+d]=h[d];
}
}
}
}
void output_field(char *pl,IMD *id,MSPD *sp)
{
void output_png(int nt,double complex cet,char *pl,IMD *id);
void output_color_bar(IMD *id);
FILE *fp;
char fn[128];
double dt;
int n;
dt=sp->bm.lambda_0/(double)id->ts;
#pragma omp parallel for schedule(dynamic)
for(n=0;n<id->ts;n++){
output_png(n,cexp(-I*sp->bm.omega*dt*(double)n),pl,id);
}
// print info
sprintf(fn,"%s/%s_info.txt",id->dir_name,pl);
fp=fopen(fn,"wt");
if(fp==NULL){
printf("Failed to open the %s file. Exit...\n",fn);
exit(1);
}
fprintf(fp,"the range of color bar\n");
fprintf(fp,"Ex is %8e to %8e\n",-id->me[0],id->me[0]);
fprintf(fp,"Ey is %8e to %8e\n",-id->me[1],id->me[1]);
fprintf(fp,"Ez is %8e to %8e\n",-id->me[2],id->me[2]);
fprintf(fp,"Hx is %8e to %8e\n",-id->mh[0],id->mh[0]);
fprintf(fp,"Hy is %8e to %8e\n",-id->mh[1],id->mh[1]);
fprintf(fp,"Hz is %8e to %8e\n",-id->mh[2],id->mh[2]);
fclose(fp);
// output color bar image
output_color_bar(id);
}
void output_png(int nt,double complex cet,char *pl,IMD *id)
{
int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b); // -1 <= x <= 1
FILE *fep[3],*fhp[3];
char fname[256],sf[3]={'x','y','z'};
int j,i,sj,si,d,m,scale;
png_uint_32 width,height;
png_structp png_e[3],png_h[3];
png_infop info_e[3],info_h[3];
png_bytepp pd_e[3],pd_h[3];
png_byte r,g,b;
m=id->m;
scale=id->scale;
width =m*(scale+1)*id->ca;
height=m*(scale+1);
for(d=0;d<3;d++){
png_e[d] =png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info_e[d]=png_create_info_struct(png_e[d]);
sprintf(fname,"%s/%s_E%c_%03d.png",id->dir_name,pl,sf[d],nt);
fep[d]=fopen(fname,"wb");
if(fep[d]==NULL){
printf("Failed to open the %s file. Exit...\n",fname);
exit(1);
}
png_h[d] =png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info_h[d]=png_create_info_struct(png_h[d]);
sprintf(fname,"%s/%s_H%c_%03d.png",id->dir_name,pl,sf[d],nt);
fhp[d]=fopen(fname,"wb");
if(fhp[d]==NULL){
printf("Failed to open the %s file. Exit...\n",fname);
exit(1);
}
png_init_io(png_e[d],fep[d]);
png_set_IHDR(png_e[d],info_e[d],width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT);
pd_e[d]=(png_bytepp)png_malloc(png_e[d],sizeof(png_bytep)*height);
png_set_rows(png_e[d],info_e[d],pd_e[d]);
png_init_io(png_h[d],fhp[d]);
png_set_IHDR(png_h[d],info_h[d],width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT);
pd_h[d]=(png_bytepp)png_malloc(png_h[d],sizeof(png_bytep)*height);
png_set_rows(png_h[d],info_h[d],pd_h[d]);
for(j=0;j<height;j++){
pd_e[d][j]=(png_bytep)png_malloc(png_e[d],sizeof(png_byte)*width*3);
pd_h[d][j]=(png_bytep)png_malloc(png_h[d],sizeof(png_byte)*width*3);
}
}
for(i=0;i<m;i++){
for(j=0;j<m*id->ca;j++){
for(d=0;d<3;d++){
color_rgb(creal(cet*id->ve[i*m*id->ca*3+j*3+d])/id->me[d],&r,&g,&b);
for(si=0;si<=scale;si++){
for(sj=0;sj<=scale;sj++){
pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+0]=r;
pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+1]=g;
pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+2]=b;
}
}
color_rgb(creal(cet*id->vh[i*m*id->ca*3+j*3+d])/id->mh[d],&r,&g,&b);
for(si=0;si<=scale;si++){
for(sj=0;sj<=scale;sj++){
pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+0]=r;
pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+1]=g;
pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+2]=b;
}
}
}
}
}
for(d=0;d<3;d++){
png_write_png(png_e[d],info_e[d],PNG_TRANSFORM_IDENTITY,NULL);
png_write_png(png_h[d],info_h[d],PNG_TRANSFORM_IDENTITY,NULL);
for(j=0;j<height;j++){
png_free(png_e[d],pd_e[d][j]);
png_free(png_h[d],pd_h[d][j]);
}
png_free(png_e[d],pd_e[d]);
png_free(png_h[d],pd_h[d]);
fclose(fep[d]);
fclose(fhp[d]);
}
}
void output_color_bar(IMD *id)
{
int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b); // -1 <= x <= 1
FILE *fp;
char fname[128];
int j,i;
png_uint_32 width,height;
png_structp png;
png_infop info;
png_bytepp pdata;
png_byte r,g,b;
sprintf(fname,"%s/color_bar.png",id->dir_name);
height=id->m*(id->scale+1);
width=height/16;
png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
info= png_create_info_struct(png);
fp=fopen(fname,"wb");
if(fp==NULL){
printf("Failed to open the %s file. Exit...\n",fname);
exit(1);
}
png_init_io(png, fp);
png_set_IHDR(png,info,width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT);
pdata=(png_bytepp)png_malloc(png, sizeof(png_bytep)*height);
png_set_rows(png,info,pdata);
for(j=0;j<height;j++){
pdata[j]=(png_bytep)png_malloc(png,sizeof(png_byte)*width*3);
}
for(i=0;i<height;i++){
color_rgb(1.0-(2.0/(double)height)*(double)i,&r,&g,&b);
for(j=0;j<width;j++){
pdata[i][j*3+0]=r;
pdata[i][j*3+1]=g;
pdata[i][j*3+2]=b;
}
}
png_write_png(png, info, PNG_TRANSFORM_IDENTITY, NULL);
for(j=0;j<height;j++){
png_free(png,pdata[j]);
}
png_free(png,pdata);
fclose(fp);
}
int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b) // -1 <= x <= 1
{
double i_nc,dr,dg,db;
unsigned int i,n,nc,nd;
if(x<-1.0 || x>1.0){
*r=0x00; *g=0x00; *b=0x00;
return -1;
}
n=(unsigned int)floor(pow(2,23)*(x+1.0));
nc=(unsigned int)pow(2,21);
i_nc=1.0/(double)nc;
if(n<nc*1) i=1;
else if(n<nc*2) i=2;
else if(n<nc*3) i=3;
else if(n<nc*4) i=4;
else if(n<nc*5) i=5;
else if(n<nc*6) i=6;
else if(n<nc*7) i=7;
else if(n<nc*8) i=8;
else {
*r=ct1[8][0]; *g=ct1[8][1]; *b=ct1[8][2];
return 0;
}
nd=n-nc*(i-1);
dr=(double)(ct1[i][0]-ct1[i-1][0])*i_nc;
dg=(double)(ct1[i][1]-ct1[i-1][1])*i_nc;
db=(double)(ct1[i][2]-ct1[i-1][2])*i_nc;
*r=(png_byte)floor((double)ct1[i-1][0]+dr*(double)nd);
*g=(png_byte)floor((double)ct1[i-1][1]+dg*(double)nd);
*b=(png_byte)floor((double)ct1[i-1][2]+db*(double)nd);
return 0;
}
|
Alloc.h | /* iPIC3D was originally developed by Stefano Markidis and Giovanni Lapenta.
* This release was contributed by Alec Johnson and Ivy Bo Peng.
* Publications that use results from iPIC3D need to properly cite
* 'S. Markidis, G. Lapenta, and Rizwan-uddin. "Multi-scale simulations of
* plasma with iPIC3D." Mathematics and Computers in Simulation 80.7 (2010): 1509-1519.'
*
* Copyright 2015 KTH Royal Institute of Technology
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IPIC_ALLOC_H
#define IPIC_ALLOC_H
#include <cstddef> // for alignment stuff
#include "asserts.h" // for assert_le, assert_lt
#include "arraysfwd.h"
//#include "arrays.h" // fixed-dimension arrays
/*
Array classes developed by
Alec Johnson,
consolidating arrays developed by
Reger Ferrer, Vicenç Beltran, and Florentino Sainz
and earlier arrays defined by
Jorge Amaya and Stefano Markidis.
For examples of use of this class,
see test_arrays.cpp
Compiler options:
-DCHECK_BOUNDS: check bounds when performing array access
(major performance penalty).
-DFLAT_ARRAYS: use calculated 1d subscript to dereference
even for arr[i][j][k] notation.
-DCHAINED_ARRAYS: use hierarchy of pointers to dereference
even for arr.get(i,j,k) notation.
By default, chained pointers are used for arr[i][j][k]
notation (unless -DCHECK_BOUNDS is turned on, in which case
we don't care about performance anyway), and calculated 1d
subscript is used for arr.get(i,j,k) notation.
An alternative would have been use boost arrays. Use of our
own array class allows flexibility for our choice of array
implementation, including the possibility of using boost
for the implementation, while avoiding boost as an external
dependency. On some systems, it may be preferable to use
native arrays with hard-coded dimensions; this could suit us
well, since all arrays are approximately the same size, but
would require a recompile when changing the maximum array size.
Rather than using these templates directly, the typedefs
declared in "arraysfwd.h" should be used:
* const_arr3_double = const_array_ref3<double>
* arr3_double = array_ref3<double>
* array3_double = array3<double>
The point is that we do not want to hard-code the fact that
we are using templates, and we may well wish to eliminate use
of templates in the future. (Alternatives are to use the
preprocessor or to have separate implementations for each
type (double, int, possibly float) if we go to use of mixed
precision). Support for templates is notoriously buggy in
compilers, particularly when it comes to inheritance, and I
in fact had to eliminate inheriting from the base_arr class
and use the "protected" hack below in order to get this
code to compile on the latest intel compiler (2013) and on
g++ 4.0 (2005); g++ 4.2 (2007) compiled (but unfortunately,
for my g++ 4.2, iPic3D suffered from stack frame corruption.)
//
Note that the directive
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
appears not only here but also in arraysfwd.h
*/
#define ALIGNMENT (64)
#ifdef __INTEL_COMPILER
#define ALLOC_ALIGNED __attribute__((aligned(ALIGNMENT)))
#define ASSUME_ALIGNED(X) __assume_aligned(X, ALIGNMENT)
#define ALIGNED(X) __assume_aligned(X, ALIGNMENT)
#define AlignedAlloc(T, NUM) \
(T *const __restrict__)(_mm_malloc(sizeof(T)*NUM, ALIGNMENT))
#define AlignedFree(S) (_mm_free(S))
#else
#define ALLOC_ALIGNED
#define ASSUME_ALIGNED(X)
#define ALIGNED(X)
#define AlignedFree(S) (delete[] S)
#define AlignedAlloc(T, NUM) (new T[NUM])
#endif
inline bool is_aligned(void *p, int N)
{
return (unsigned long)p % N == 0;
}
#define assert_aligned(X, N) assert(is_aligned(X, N));
// Compile with -DCHECK_BOUNDS to turn on bounds checking.
//#define CHECK_BOUNDS
#ifdef CHECK_BOUNDS
#define check_bounds(n,S) {assert_le(0, n); assert_lt(n, S);}
#else
#define check_bounds(n,S)
#endif
/*** begin Array classes with flexible dimensions ***/
// methods to allocate arrays.
// These are a succinct equivalent of Jorge's earler methods,
// except for the use of AlignedAlloc in place of new.
//
template < class type >
inline type * newArray1(size_t sz1)
{
type *arr = AlignedAlloc(type, sz1); // new type [sz1];
return arr;
}
template < class type >
inline type ** newArray2(size_t sz1, size_t sz2)
{
type **arr = AlignedAlloc(type*, sz1); // new type *[sz1];
type *ptr = newArray1<type>(sz1*sz2);
for (size_t i = 0; i < sz1; i++)
{
arr[i] = ptr;
ptr += sz2;
}
return arr;
}
template < class type >
inline type *** newArray3(size_t sz1, size_t sz2, size_t sz3)
{
type ***arr = AlignedAlloc(type**, sz1); // new type **[sz1];
type **ptr = newArray2<type>(sz1*sz2, sz3);
for (size_t i = 0; i < sz1; i++)
{
arr[i] = ptr;
ptr += sz2;
}
return arr;
}
template <class type>
inline type **** newArray4(size_t sz1, size_t sz2, size_t sz3, size_t sz4)
{
type ****arr = AlignedAlloc(type***, sz1); //(new type ***[sz1]);
type ***ptr = newArray3<type>(sz1*sz2, sz3, sz4);
for (size_t i = 0; i < sz1; i++) {
arr[i] = ptr;
ptr += sz2;
}
return arr;
}
// build chained pointer hierarchy for pre-existing bottom level
//
template <class type>
inline type **** newArray4(type * in, size_t sz1, size_t sz2, size_t sz3, size_t sz4)
{
type****arr = newArray3<type*>(sz1,sz2,sz3);
type**arr2 = **arr;
type *ptr = in;
size_t szarr2 = sz1*sz2*sz3;
for(size_t i=0;i<szarr2;i++) {
arr2[i] = ptr;
ptr += sz4;
}
return arr;
}
template <class type>
inline type *** newArray3(type * in, size_t sz1, size_t sz2, size_t sz3)
{
type***arr = newArray2<type*>(sz1,sz2);
type**arr2 = *arr;
type *ptr = in;
size_t szarr2 = sz1*sz2;
for(size_t i=0;i<szarr2;i++) {
arr2[i] = ptr;
ptr += sz3;
}
return arr;
}
template <class type>
inline type ** newArray2(type * in, size_t sz1, size_t sz2)
{
type**arr = newArray1<type*>(sz1);
type *ptr = in;
for(size_t i=0;i<sz1;i++) {
arr[i] = ptr;
ptr += sz2;
}
return arr;
}
// methods to deallocate arrays
//
template < class type > inline void delArray1(type * arr)
{ AlignedFree(arr); }
template < class type > inline void delArray2(type ** arr)
{ delArray1(arr[0]); AlignedFree(arr); }
template < class type > inline void delArray3(type *** arr)
{ delArray2(arr[0]); AlignedFree(arr); }
template < class type > inline void delArray4(type **** arr)
{ delArray3(arr[0]); AlignedFree(arr); }
//
// versions with dummy dimensions (for backwards compatibility)
//
template <class type> inline void delArr1(type * arr)
{ delArray1(arr); }
template <class type> inline void delArr2(type ** arr, size_t sz1)
{ delArray2(arr); }
template <class type> inline void delArr3(type *** arr, size_t sz1, size_t sz2)
{ delArray3(arr); }
template <class type> inline void delArr4(type **** arr,
size_t sz1, size_t sz2, size_t sz3)
{ delArray3(arr); }
namespace iPic3D
{
// underlying 1-dimensional array class for arrays
template <class type>
class base_arr
{
private:
size_t size;
protected:
type* const __restrict__ arr;
public:
const type* get_arr()const{return arr;}
base_arr(size_t s) : size(s), arr(AlignedAlloc(type, s)) {}
base_arr(type* in, size_t s) : size(s), arr(in) {}
~base_arr(){}
int get_size() { return size; }
void free() { AlignedFree(arr); }
void setall(type val){
// #pragma omp for
for(size_t i=0;i<size;i++) arr[i]=val;
}
type* fetch_arr(){return arr;}
};
// classes to dereference arrays.
//
// array_fetchN is essentially a dumbed-down version of ArrN with
// an index shift applied to the underlying array. The purpose
// of array_fetchN is to allow elements of multidimensional arrays
// to be accessed with a calculated one-dimensional index while
// using chained operator[] syntax (e.g. myarr[i][j]), i.e. the
// same syntax as is used for native or nested arrays. This
// implementation is likely to be slow unless optimization is
// turned on, allowing the compiler to figure out that the whole
// chain of calls to the operator[] methods and to the array_fetchN
// constructors reduces to computing a one-dimensional subscript
// used to access a one-dimensional array.
//
// Unfortunately, though the intel compiler allows it, the ISO
// C++ standard evidently does not allow a class to convert
// itself to an object with a method that exists in the class,
// and g++ enforces this. Specifically, ISO C++ considers
// it ambiguous to have conversion to chained pointer and
// to have operator[] that returns the following classes.
// In particular, our array classes cannot have both of the
// following:
// - automatic conversion to chained pointer and
// - operator[].
//
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
#warning "arrays are flat"
template <class type>
class array_fetch1
{
type* __restrict__ arr;
size_t S1;
size_t shift;
public:
inline array_fetch1() : arr(0), shift(0), S1(0) {}
inline array_fetch1(type*const arr_, size_t k, size_t s1) :
arr(arr_), shift(k), S1(s1)
{}
inline array_fetch1& operator=(const array_fetch1& in)
{
arr = in.arr;
S1 = in.S1;
shift = in.shift;
}
inline type& operator[](size_t n1){
check_bounds(n1, S1);
ALIGNED(arr);
return arr[shift+n1];
}
//operator type*(){ return &arr[shift]; }
//inline type* fetch_arr(){return arr;}
};
template <class type>
class array_fetch2
{
type* const __restrict__ arr;
const size_t shift;
const size_t S2, S1;
public:
inline array_fetch2(type*const arr_, size_t k, size_t s2, size_t s1) :
arr(arr_), shift(k), S2(s2), S1(s1)
{}
inline array_fetch1<type> operator[](size_t n2){
check_bounds(n2,S2);
return array_fetch1<type>(arr, (shift+n2)*S1, S1);
}
};
template <class type>
class array_fetch3
{
type* const __restrict__ arr;
const size_t shift;
const size_t S3, S2, S1;
public:
inline array_fetch3(type*const arr_, size_t k, size_t s3, size_t s2, size_t s1) :
arr(arr_), shift(k), S3(s3), S2(s2), S1(s1)
{}
inline array_fetch2<type> operator[](size_t n3){
check_bounds(n3, S3);
return array_fetch2<type>(arr, (shift+n3)*S2, S2, S1);
}
};
// const versions
template <class type>
class const_array_get1
{
type const* const __restrict__ arr;
const size_t S1;
const size_t shift;
public:
inline const_array_get1(type const*const arr_, size_t k, size_t s1) :
arr(arr_), shift(k), S1(s1)
{}
inline const type& operator[](size_t n1)const{
check_bounds(n1, S1);
ALIGNED(arr);
return arr[shift+n1];
}
//operator type const*()const{return arr;}
};
template <class type>
class const_array_get2
{
type const*const __restrict__ arr;
const size_t shift;
const size_t S2, S1;
public:
inline const_array_get2(type const*const arr_, size_t k, size_t s2, size_t s1) :
arr(arr_), shift(k), S2(s2), S1(s1)
{}
inline const const_array_get1<type> operator[](size_t n2)const{
check_bounds(n2,S2);
return const_array_get1<type>(arr, (shift+n2)*S1, S1);
}
};
template <class type>
class const_array_get3
{
type const*const __restrict__ arr;
const size_t shift;
const size_t S3, S2, S1;
public:
const_array_get3(type const*const arr_, size_t k, size_t s3, size_t s2, size_t s1) :
arr(arr_), shift(k), S3(s3), S2(s2), S1(s1)
{}
inline const const_array_get2<type> operator[](size_t n3)const{
check_bounds(n3, S3);
return const_array_get2<type>(arr, (shift+n3)*S2, S2, S1);
}
};
#else
//#warning "arrays are not flat
#endif // FLAT_ARRAYS
// ArrN corresponds to multi_array_ref in the boost library.
//
// ArrN can adopt an array allocated by newArrN
//
// The purpose of these classes is to provide more efficient
// and more regulated access to array elements. The idea is to
// maintain backward compatibility while allowing us to move
// toward a proper array abstraction.
//
// The user of ArrN is responsible for memory management.
// The ArrayN classes are the version of this class
// with automatic deallocation.
//
// Examples:
//
// Using constructor to create array:
// {
// array_ref2 arr<int>(16, 16);
// arr[1][2] = 5;
// arr.free();
// }
// Using ArrN to adopt an array allocated by newArrN
// {
// int** array = newArray2<int>(16,16)
// array_ref2 arr(array,16,16); // adopt array
// arr[1][2] = 5;
// assert_eq(arr[1][2],array[1][2]);
// // arr.free(); // should not do both this and next line.
// delArray2<int>(array);
// }
//
// proposed improvements:
// - allow shifting of the base:
// - need "double shift" in each class
// - need to implement "arr3.set_bases(b1,b2,b3);"
// which calculates "shift".
// - need "const size_t b1, b2, b3;" for beginning indices
// to allow bounds checking. Should not incur run-time
// penalty, but it so then condition on CHECK_BOUNDS.
// - methods that use parallel arithmetic for omp and vectorized code
template <class type>
class array_ref1
{
private: // data
const size_t S1;
type* const __restrict__ arr;
public:
~array_ref1() { }
void free() { AlignedFree(arr); }
array_ref1(size_t s1) :
S1(s1),
arr(AlignedAlloc(type, s1))
{ }
array_ref1(type* in,
size_t s1) :
S1(s1),
arr(in)
{ }
inline type& operator[](size_t n1){
check_bounds(n1, S1);
ALIGNED(arr);
return arr[n1];
}
inline size_t getidx(size_t n1) const
{
check_bounds(n1, S1);
return n1;
}
const type& get(size_t n1) const
{ ALIGNED(arr); return arr[getidx(n1)]; }
type& fetch(size_t n2,size_t n1) const
{ ALIGNED(arr); return arr[getidx(n1)]; }
void set(size_t n1, type value)
{ ALIGNED(arr); arr[getidx(n1)] = value; }
};
template <class type>
class const_array_ref2 : public base_arr<type>
{
public:
using base_arr<type>::arr;
using base_arr<type>::get_arr;
protected: // data
size_t size;
const size_t S2,S1;
type*const*const arr2;
public:
~const_array_ref2(){}
const_array_ref2(size_t s2, size_t s1) :
size(s2*s1),
base_arr<type>(s2*s1),
S2(s2), S1(s1),
arr2(newArray2<type>(arr,s2,s1))
{ }
const_array_ref2(type*const* in,
size_t s2, size_t s1) :
size(s2*s1), //arr(**in),
base_arr<type>(*in, s2*s1),
S2(s2), S1(s1),
arr2(in)
{ }
int get_size() const { return size; }
size_t dim1() const { return S2; }
size_t dim2() const { return S1; }
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
const const_array_get1<type> operator[](size_t n2)const{
check_bounds(n2, S2);
return const_array_get1<type>(arr, n2*S1, S1);
}
#else
// make operator[] dereference via chained pointer
operator type**(){ return (type**) arr2; }
//inline const const_array_get1<type> operator[](size_t n2)const{
// return const_array_get1<type>(arr2[n2]);
//}
//inline const type* operator[](size_t n2)const{
// return arr2[n2];
//}
#endif
void check_idx_bounds(size_t n2, size_t n1) const
{
check_bounds(n2, S2);
check_bounds(n1, S1);
}
inline size_t getidx(size_t n2, size_t n1) const
{ check_idx_bounds(n2,n1); return n2*S1+n1; }
#ifdef CHAINED_ARRAYS
const type& get(size_t n2,size_t n1) const
{ check_idx_bounds(n2,n1); return arr2[n2][n1]; }
protected: // hack: not in const_array_ref3 due to icpc compile error
type& fetch(size_t n2,size_t n1) const
{ check_idx_bounds(n2,n1); return arr2[n2][n1]; }
void set(size_t n2,size_t n1, type value)
{ check_idx_bounds(n2,n1); arr2[n2][n1] = value; }
#else
const type& get(size_t n2,size_t n1) const
{ ALIGNED((type*)arr); return arr[getidx(n2,n1)]; }
protected: // hack: not in const_array_ref3 due to icpc compile error
type& fetch(size_t n2,size_t n1) const
{ ALIGNED((type*)arr); return arr[getidx(n2,n1)]; }
void set(size_t n2,size_t n1, type value)
{ ALIGNED((type*)arr); arr[getidx(n2,n1)] = value; }
#endif
public:
const double** get_arr2(){return (const double**) arr2;}
};
template <class type>
class array_ref2 : public const_array_ref2<type>
{
//using base_arr<type>::arr;
using const_array_ref2<type>::size;
using const_array_ref2<type>::arr;
using const_array_ref2<type>::S2;
using const_array_ref2<type>::S1;
using const_array_ref2<type>::arr2;
using const_array_ref2<type>::getidx;
public:
using base_arr<type>::get_arr;
public:
~array_ref2(){}
array_ref2(size_t s2, size_t s1) :
const_array_ref2<type>(s2,s1)
{ }
array_ref2(type*const* in,
size_t s2, size_t s1) :
const_array_ref2<type>(in,s2,s1)
{ }
void free(){ delArray2<type>((type***)arr2); }
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
inline array_fetch1<type> operator[](size_t n2){
check_bounds(n2, S2);
return array_fetch1<type>(arr, n2*S1, S1);
}
#else
// make operator[] dereference via chained pointer
operator type**(){ return (type**) arr2; }
//inline array_fetch1<type> operator[](size_t n2){
// return array_fetch1<type>(arr2[n2]);
//}
//inline type* operator[](size_t n2){
// return arr2[n2];
//}
#endif
type& fetch(size_t n2,size_t n1) const
{ return const_array_ref2<type>::fetch(n2,n1); }
void set(size_t n2,size_t n1, type value)
{ const_array_ref2<type>::set(n2,n1, value); }
void setall(type val){
// #pragma omp for
for(size_t i=0;i<size;i++) arr[i]=val;
}
type** fetch_arr2(){ return (type**) arr2; }
};
template <class type>
class const_array_ref3 : public base_arr<type>
{
public:
using base_arr<type>::arr;
using base_arr<type>::get_arr;
protected: // data
size_t size;
const size_t S3,S2,S1;
//type* const __restrict__ arr;
type*const*const*const arr3;
public:
~const_array_ref3(){}
const_array_ref3(size_t s3, size_t s2, size_t s1) :
size(s3*s2*s1), //arr(AlignedAlloc(type, size)),
base_arr<type>(s3*s2*s1),
S3(s3), S2(s2), S1(s1),
arr3(newArray3<type>(arr,s3,s2,s1))
{ }
const_array_ref3(type*const*const* in,
size_t s3, size_t s2, size_t s1) :
size(s3*s2*s1), //arr(**in),
base_arr<type>(**in, s3*s2*s1),
S3(s3), S2(s2), S1(s1),
arr3(in)
{ }
int get_size() const { return size; }
size_t dim1() const { return S3; }
size_t dim2() const { return S2; }
size_t dim3() const { return S1; }
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
const const_array_get2<type> operator[](size_t n3)const{
check_bounds(n3, S3);
return const_array_get2<type>(arr, n3*S2, S2, S1);
}
#else
// make operator[] dereference via chained pointer
operator type***(){ return (type***) arr3; }
//inline const const_array_get2<type> operator[](size_t n3)const{
// return const_array_get2<type>(arr3[n3]);
//}
//inline type*const* operator[](size_t n3)const{
// return arr3[n3];
//}
#endif
void check_idx_bounds(size_t n3, size_t n2, size_t n1) const
{
check_bounds(n3, S3);
check_bounds(n2, S2);
check_bounds(n1, S1);
}
inline size_t getidx(size_t n3, size_t n2, size_t n1) const
{ check_idx_bounds(n3,n2,n1); return (n3*S2+n2)*S1+n1; }
#ifdef CHAINED_ARRAYS
const type& get(size_t n3,size_t n2,size_t n1) const
{ check_idx_bounds(n3,n2,n1); return arr3[n3][n2][n1]; }
protected: // hack: not in const_array_ref3 due to icpc compile error
type& fetch(size_t n3,size_t n2,size_t n1) const
{ check_idx_bounds(n3,n2,n1); return arr3[n3][n2][n1]; }
void set(size_t n3,size_t n2,size_t n1, type value)
{ check_idx_bounds(n3,n2,n1); arr3[n3][n2][n1] = value; }
#else
const type& get(size_t n3,size_t n2,size_t n1) const
{ ALIGNED((type*)arr); return arr[getidx(n3,n2,n1)]; }
protected: // hack: not in const_array_ref3 due to icpc compile error
type& fetch(size_t n3,size_t n2,size_t n1) const
{ ALIGNED((type*)arr); return arr[getidx(n3,n2,n1)]; }
void set(size_t n3,size_t n2,size_t n1, type value)
{ ALIGNED((type*)arr); arr[getidx(n3,n2,n1)] = value; }
#endif
public:
const double*** get_arr3(){return (const double***) arr3;}
};
template <class type>
class array_ref3 : public const_array_ref3<type>
{
//using base_arr<type>::arr;
using const_array_ref3<type>::size;
using const_array_ref3<type>::arr;
using const_array_ref3<type>::S3;
using const_array_ref3<type>::S2;
using const_array_ref3<type>::S1;
using const_array_ref3<type>::arr3;
using const_array_ref3<type>::getidx;
public:
using base_arr<type>::get_arr;
public:
~array_ref3(){}
array_ref3(size_t s3, size_t s2, size_t s1) :
const_array_ref3<type>(s3,s2,s1)
{ }
array_ref3(type*const*const* in,
size_t s3, size_t s2, size_t s1) :
const_array_ref3<type>(in,s3,s2,s1)
{ }
void free(){ delArray3<type>((type***)arr3); }
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
inline array_fetch2<type> operator[](size_t n3){
check_bounds(n3, S3);
return array_fetch2<type>(arr, n3*S2, S2, S1);
}
#else
// make operator[] dereference via chained pointer
operator type***(){ return (type***) arr3; }
// unfortunately ISO C++ considers this ambiguous:
//inline array_fetch2<type> operator[](size_t n3){
// return array_fetch2<type>((type**)arr3[n3]);
//}
//inline type** operator[](size_t n3){
// return (type**)arr3[n3];
//}
#endif
type& fetch(size_t n3,size_t n2,size_t n1) const
{ return const_array_ref3<type>::fetch(n3,n2,n1); }
void set(size_t n3,size_t n2,size_t n1, type value)
{ const_array_ref3<type>::set(n3,n2,n1, value); }
void setall(type val){
// #pragma omp for
for(size_t i=0;i<size;i++) arr[i]=val;
}
type*** fetch_arr3(){ return (type***) arr3; }
};
// inheriting from base_arr<type> causes problems in g++ 4.0 (2005).
template <class type>
class const_array_ref4 : public base_arr<type>
{
public:
using base_arr<type>::arr;
using base_arr<type>::get_arr;
protected: // data
size_t size;
const size_t S4,S3,S2,S1;
type*const*const*const*const arr4;
public:
~const_array_ref4(){}
const_array_ref4(size_t s4, size_t s3, size_t s2, size_t s1) :
size(s4*s3*s2*s1), //arr(AlignedAlloc(type, size)),
base_arr<type>(s4*s3*s2*s1),
S4(s4), S3(s3), S2(s2), S1(s1),
arr4(newArray4<type>((type*)get_arr(),s4,s3,s2,s1))
{ }
const_array_ref4(type*const*const*const* in,
size_t s4, size_t s3, size_t s2, size_t s1) :
size(s4*s3*s2*s1), //arr(***in),
base_arr<type>(***in, s4*s3*s2*s1),
S4(s4), S3(s3), S2(s2), S1(s1),
arr4(in)
{ }
int get_size() const { return size; }
//const size_t* dims()const{ return _dims; }
size_t dim1() const { return S4; }
size_t dim2() const { return S3; }
size_t dim3() const { return S2; }
size_t dim4() const { return S1; }
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
const const_array_get3<type> operator[](size_t n4)const{
check_bounds(n4, S4);
return const_array_get3<type>(arr, n4*S3, S3, S2, S1);
}
#else
// make operator[] dereference via chained pointer
operator type****(){ return (type****) arr4; }
// unfortunately ISO C++ considers this ambiguous
//inline const_array_get3<type> operator[](size_t n3){
// return const_array_get3<type>(arr4[n3]);
//}
//inline type*const*const* operator[](size_t n3)const{
// return arr4[n3];
//}
#endif
void check_idx_bounds(size_t n4, size_t n3, size_t n2, size_t n1) const
{
check_bounds(n4, S4);
check_bounds(n3, S3);
check_bounds(n2, S2);
check_bounds(n1, S1);
}
inline size_t getidx(size_t n4, size_t n3, size_t n2, size_t n1) const
{ check_idx_bounds(n4,n3,n2,n1); return ((n4*S3+n3)*S2+n2)*S1+n1; }
#ifdef CHAINED_ARRAYS
const type& get(size_t n4,size_t n3,size_t n2,size_t n1) const
{ check_idx_bounds(n4,n3,n2,n1); return arr4[n4][n3][n2][n1]; }
protected: // hack: not in const_array_ref4 due to icpc compile error
type& fetch(size_t n4,size_t n3,size_t n2,size_t n1) const
{ check_idx_bounds(n4,n3,n2,n1); return arr4[n4][n3][n2][n1]; }
void set(size_t n4,size_t n3,size_t n2,size_t n1, type value)
{ check_idx_bounds(n4,n3,n2,n1); arr4[n4][n3][n2][n1] = value; }
#else
const type& get(size_t n4,size_t n3,size_t n2,size_t n1) const
{ ALIGNED((type*)arr); return arr[getidx(n4,n3,n2,n1)]; }
protected: // hack: not in const_array_ref4 due to icpc compile error
type& fetch(size_t n4,size_t n3,size_t n2,size_t n1) const
{ ALIGNED((type*)arr); return arr[getidx(n4,n3,n2,n1)]; }
void set(size_t n4,size_t n3,size_t n2,size_t n1, type value)
{ ALIGNED((type*)arr); arr[getidx(n4,n3,n2,n1)] = value; }
#endif
protected:
void setall(type val)
{
// #pragma omp for
for(int i=0;i<size;i++) arr[i]=val;
}
public:
const double**** get_arr4(){return (const double****) arr4;}
};
template <class type>
class array_ref4 : public const_array_ref4<type>
{
using const_array_ref4<type>::arr;
using const_array_ref4<type>::S4;
using const_array_ref4<type>::S3;
using const_array_ref4<type>::S2;
using const_array_ref4<type>::S1;
using const_array_ref4<type>::arr4;
using const_array_ref4<type>::getidx;
public: // this did not work unless I made the using statment public.
using const_array_ref4<type>::get_size;
using base_arr<type>::get_arr;
public:
~array_ref4(){}
array_ref4(size_t s4, size_t s3, size_t s2, size_t s1) :
const_array_ref4<type>(s4,s3,s2,s1)
{ }
array_ref4(type*const*const*const* in,
size_t s4, size_t s3, size_t s2, size_t s1) :
const_array_ref4<type>(in,s4,s3,s2,s1)
{ }
#if defined(FLAT_ARRAYS) || defined(CHECK_BOUNDS)
inline array_fetch3<type> operator[](size_t n4){
check_bounds(n4, S4);
return array_fetch3<type>(arr, n4*S3, S3, S2, S1);
}
#else
operator type****(){ return (type****) arr4; }
// unfortunately ISO C++ considers this ambiguous
//inline array_fetch3<type> operator[](size_t n4){
// return array_fetch3<type>((type***)arr4[n4]);
//}
#endif
type& fetch(size_t n4,size_t n3,size_t n2,size_t n1) const
{ return const_array_ref4<type>::fetch(n4,n3,n2,n1); }
void set(size_t n4,size_t n3,size_t n2,size_t n1, type value)
{ const_array_ref4<type>::set(n4,n3,n2,n1, value); }
void free(){ delArray4<type>((type****)arr4); }
type**** fetch_arr4(){ return (type****) arr4; }
void setall(type val) { const_array_ref4<type>::setall(val); }
};
// Versions of array classes which automatically free memory
// (corresponding to multi_array in the boost library).
//
// Note that the nonempty destructor kills performance
// unless compiling with -fno-exceptions
template <class type>
struct array1 : public array_ref1<type>
{
~array1(){array_ref1<type>::free();}
array1(size_t s1) : array_ref1<type>(s1) { }
};
template <class type>
struct array2 : public array_ref2<type>
{
~array2(){array_ref2<type>::free();}
array2(size_t s2, size_t s1) : array_ref2<type>(s2,s1) { }
};
template <class type>
struct array3 : public array_ref3<type>
{
~array3(){array_ref3<type>::free();}
array3(size_t s3, size_t s2, size_t s1) : array_ref3<type>(s3,s2,s1) { }
};
template <class type>
struct array4 : public array_ref4<type>
{
~array4(){array_ref4<type>::free();}
array4(size_t s4, size_t s3, size_t s2, size_t s1)
: array_ref4<type>(s4,s3,s2,s1) { }
};
template < class type >
inline const type**** get_arr4(const_array_ref4<type>& in)
{ return in.get_arr4(); }
template < class type >
inline type**** fetch_arr4(array_ref4<type>& in)
{ return in.fetch_arr4(); }
template < class type >
inline const type*** get_arr3(const_array_ref3<type>& in)
{ return in.get_arr3(); }
template < class type >
inline type*** fetch_arr3(array_ref3<type>& in)
{ return in.fetch_arr3(); }
template < class type >
inline const type** get_arr2(const_array_ref2<type>& in)
{ return in.get_arr2(); }
template < class type >
inline type** fetch_arr2(array_ref2<type>& in)
{ return in.fetch_arr2(); }
template < class type >
inline type* fetch_arr(array_ref1<type>& in)
{ return in.get_arr(); }
template < class type >
inline type* fetch_arr(array_fetch1<type>& in)
{ return in.fetch_arr(); }
}
// Unfortunately we cannot make an arr_fetch3<type> automatically
// convert itself to a type***, since it overrides its methods,
// so the user must use an explicit conversion routine.
// to a ***
#define newArr4(type,sz1,sz2,sz3,sz4) newArray4<type>((sz1),(sz2),(sz3),(sz4))
#define newArr3(type,sz1,sz2,sz3) newArray3<type>((sz1),(sz2),(sz3))
#define newArr2(type,sz1,sz2) newArray2<type>((sz1),(sz2))
/*** end Array classes with flexible dimensions ***/
#endif
|
client_utils.h | // Copyright (c) 2020 - present Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef CLIENT_UTILS_H
#define CLIENT_UTILS_H
#include <algorithm>
#include <complex>
#include <iostream>
#include <mutex>
#include <numeric>
#include <omp.h>
#include <random>
#include <tuple>
#include <vector>
#include "../shared/printbuffer.h"
#include "rocfft.h"
#include <hip/hip_runtime_api.h>
static const size_t ONE_GiB = 1 << 30;
// Determine the size of the data type given the precision and type.
template <typename Tsize>
inline Tsize var_size(const rocfft_precision precision, const rocfft_array_type type)
{
size_t var_size = 0;
switch(precision)
{
case rocfft_precision_single:
var_size = sizeof(float);
break;
case rocfft_precision_double:
var_size = sizeof(double);
break;
}
switch(type)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
var_size *= 2;
break;
default:
break;
}
return var_size;
}
// Container class for test parameters.
class rocfft_params
{
public:
// All parameters are row-major.
std::vector<size_t> length;
std::vector<size_t> istride;
std::vector<size_t> ostride;
size_t nbatch = 1;
rocfft_precision precision = rocfft_precision_double;
rocfft_transform_type transform_type = rocfft_transform_type_complex_forward;
rocfft_result_placement placement = rocfft_placement_inplace;
size_t idist = 0;
size_t odist = 0;
rocfft_array_type itype = rocfft_array_type_complex_interleaved;
rocfft_array_type otype = rocfft_array_type_complex_interleaved;
std::vector<size_t> ioffset = {0, 0};
std::vector<size_t> ooffset = {0, 0};
std::vector<size_t> isize;
std::vector<size_t> osize;
// run testing load/store callbacks
bool run_callbacks = false;
static constexpr double load_cb_scalar = 0.457813941;
static constexpr double store_cb_scalar = 0.391504938;
// Given an array type, return the name as a string.
std::string array_type_name(const rocfft_array_type type) const
{
switch(type)
{
case rocfft_array_type_complex_interleaved:
return "rocfft_array_type_complex_interleaved";
case rocfft_array_type_complex_planar:
return "rocfft_array_type_complex_planar";
case rocfft_array_type_real:
return "rocfft_array_type_real";
case rocfft_array_type_hermitian_interleaved:
return "rocfft_array_type_hermitian_interleaved";
case rocfft_array_type_hermitian_planar:
return "rocfft_array_type_hermitian_planar";
case rocfft_array_type_unset:
return "rocfft_array_type_unset";
}
return "";
}
// Convert to string for output.
std::string str(const std::string& separator = ", ") const
{
std::stringstream ss;
ss << "length:";
for(auto i : length)
ss << " " << i;
ss << separator;
ss << "istride:";
for(auto i : istride)
ss << " " << i;
ss << separator;
ss << "idist: " << idist << separator;
ss << "ostride:";
for(auto i : ostride)
ss << " " << i;
ss << separator;
ss << "odist: " << odist << separator;
ss << "batch: " << nbatch << separator;
ss << "isize:";
for(auto i : isize)
ss << " " << i;
ss << separator;
ss << "osize:";
for(auto i : osize)
ss << " " << i;
ss << separator;
ss << "ioffset:";
for(auto i : ioffset)
ss << " " << i;
ss << separator;
ss << "ooffset:";
for(auto i : ooffset)
ss << " " << i;
ss << separator;
if(placement == rocfft_placement_inplace)
ss << "in-place";
else
ss << "out-of-place";
ss << separator;
ss << array_type_name(itype) << " -> " << array_type_name(otype) << separator;
if(precision == rocfft_precision_single)
ss << "single-precision";
else
ss << "double-precision";
ss << separator;
ss << "ilength:";
for(const auto i : ilength())
ss << " " << i;
ss << separator;
ss << "olength:";
for(const auto i : olength())
ss << " " << i;
ss << separator;
ss << "ibuffer_size:";
for(const auto i : ibuffer_sizes())
ss << " " << i;
ss << separator;
ss << "obuffer_size:";
for(const auto i : obuffer_sizes())
ss << " " << i;
ss << separator;
return ss.str();
}
// Stream output operator (for gtest, etc).
friend std::ostream& operator<<(std::ostream& stream, const rocfft_params& params)
{
stream << params.str();
return stream;
}
// Dimension of the transform.
size_t dim() const
{
return length.size();
}
std::vector<size_t> ilength() const
{
auto ilength = length;
if(transform_type == rocfft_transform_type_real_inverse)
ilength[dim() - 1] = ilength[dim() - 1] / 2 + 1;
return ilength;
}
std::vector<size_t> olength() const
{
auto olength = length;
if(transform_type == rocfft_transform_type_real_forward)
olength[dim() - 1] = olength[dim() - 1] / 2 + 1;
return olength;
}
size_t nbuffer(const rocfft_array_type type) const
{
switch(type)
{
case rocfft_array_type_real:
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
return 1;
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
return 2;
case rocfft_array_type_unset:
return 0;
}
}
// Number of input buffers
size_t nibuffer() const
{
return nbuffer(itype);
}
// Number of output buffers
size_t nobuffer() const
{
return nbuffer(otype);
}
// Compute the farthest point from the original pointer.
size_t compute_ptrdiff(const std::vector<size_t>& length,
const std::vector<size_t>& stride,
const size_t nbatch,
const size_t dist) const
{
size_t val = 0;
if(!length.empty())
{
val = 1;
for(int i = 0; i < length.size(); ++i)
{
val += (length[i] - 1) * stride[i];
}
val += (nbatch - 1) * dist;
}
return val;
}
auto compute_isize() const
{
auto il = ilength();
size_t val = compute_ptrdiff(il, istride, nbatch, idist);
std::vector<size_t> isize(nibuffer());
for(int i = 0; i < isize.size(); ++i)
{
isize[i] = val + ioffset[i];
}
return isize;
}
auto compute_osize() const
{
auto ol = olength();
size_t val = compute_ptrdiff(ol, ostride, nbatch, odist);
std::vector<size_t> osize(nobuffer());
for(int i = 0; i < osize.size(); ++i)
{
osize[i] = val + ooffset[i];
}
return osize;
}
std::vector<size_t> ibuffer_sizes() const
{
std::vector<size_t> ibuffer_sizes;
// In-place real-to-complex transforms need to have enough space in the input buffer to
// accomadate the output, which is slightly larger.
if(placement == rocfft_placement_inplace
&& transform_type == rocfft_transform_type_real_forward)
{
return obuffer_sizes();
}
if(isize.empty())
return ibuffer_sizes;
switch(itype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
ibuffer_sizes.resize(2);
break;
default:
ibuffer_sizes.resize(1);
}
for(unsigned i = 0; i < ibuffer_sizes.size(); i++)
{
ibuffer_sizes[i] = isize[i] * var_size<size_t>(precision, itype);
}
return ibuffer_sizes;
}
std::vector<size_t> obuffer_sizes() const
{
std::vector<size_t> obuffer_sizes;
if(osize.empty())
return obuffer_sizes;
switch(otype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
obuffer_sizes.resize(2);
break;
default:
obuffer_sizes.resize(1);
}
for(unsigned i = 0; i < obuffer_sizes.size(); i++)
{
obuffer_sizes[i] = osize[i] * var_size<size_t>(precision, otype);
}
return obuffer_sizes;
}
// Estimate the amount of host memory needed.
size_t needed_ram(const int verbose) const
{
// Host input, output, and input copy: 3 buffers, all contiguous.
// Also: FFTW dummy input, and an input copy for shared futures. Total 5.
size_t needed_ram
= 5
* std::accumulate(
length.begin(), length.end(), static_cast<size_t>(1), std::multiplies<size_t>());
// GPU input buffer:
needed_ram += std::inner_product(
length.begin(), length.end(), istride.begin(), static_cast<size_t>(0));
// GPU output buffer:
needed_ram += std::inner_product(
length.begin(), length.end(), ostride.begin(), static_cast<size_t>(0));
// Account for precision and data type:
if(transform_type != rocfft_transform_type_real_forward
&& transform_type != rocfft_transform_type_real_inverse)
{
needed_ram *= 2;
}
switch(precision)
{
case rocfft_precision_single:
needed_ram *= 4;
break;
case rocfft_precision_double:
needed_ram *= 8;
break;
}
needed_ram *= nbatch;
if(verbose)
{
std::cout << "required host memory (GiB): " << needed_ram / ONE_GiB << std::endl;
}
return needed_ram;
}
// Column-major getters:
std::vector<size_t> ilength_cm() const
{
auto ilength_cm = ilength();
std::reverse(std::begin(ilength_cm), std::end(ilength_cm));
return ilength_cm;
}
std::vector<size_t> olength_cm() const
{
auto olength_cm = olength();
std::reverse(std::begin(olength_cm), std::end(olength_cm));
return olength_cm;
}
std::vector<size_t> length_cm() const
{
auto length_cm = length;
std::reverse(std::begin(length_cm), std::end(length_cm));
return length_cm;
}
std::vector<size_t> istride_cm() const
{
auto istride_cm = istride;
std::reverse(std::begin(istride_cm), std::end(istride_cm));
return istride_cm;
}
std::vector<size_t> ostride_cm() const
{
auto ostride_cm = ostride;
std::reverse(std::begin(ostride_cm), std::end(ostride_cm));
return ostride_cm;
}
// Return true if the given GPU parameters would produce a valid transform.
bool valid(const int verbose) const
{
if(ioffset.size() < nibuffer() || ooffset.size() < nobuffer())
return false;
// Check that in-place transforms have the same input and output stride:
if(placement == rocfft_placement_inplace)
{
const auto stridesize = std::min(istride.size(), ostride.size());
bool samestride = true;
for(int i = 0; i < stridesize; ++i)
{
if(istride[i] != ostride[i])
samestride = false;
}
if((transform_type == rocfft_transform_type_complex_forward
|| transform_type == rocfft_transform_type_complex_inverse)
&& !samestride)
{
// In-place transforms require identical input and output strides.
if(verbose)
{
std::cout << "istride:";
for(const auto& i : istride)
std::cout << " " << i;
std::cout << " ostride0:";
for(const auto& i : ostride)
std::cout << " " << i;
std::cout << " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
return false;
}
if((transform_type == rocfft_transform_type_complex_forward
|| transform_type == rocfft_transform_type_complex_inverse)
&& (idist != odist))
{
// In-place transforms require identical distance
if(verbose)
{
std::cout << "idist:" << idist << " odist:" << odist
<< " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
return false;
}
if((transform_type == rocfft_transform_type_real_forward
|| transform_type == rocfft_transform_type_real_inverse)
&& (istride.back() != 1 || ostride.back() != 1))
{
// In-place real/complex transforms require unit strides.
if(verbose)
{
std::cout
<< "istride.back(): " << istride.back()
<< " ostride.back(): " << ostride.back()
<< " must be unitary for in-place real/complex transforms: skipping test"
<< std::endl;
}
return false;
}
if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved))
{
if(verbose)
{
std::cout << "In-place c2c transforms require identical io types; skipped.\n";
}
return false;
}
// Check offsets
switch(transform_type)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
for(int i = 0; i < nibuffer(); ++i)
{
if(ioffset[i] != ooffset[i])
return false;
}
break;
case rocfft_transform_type_real_forward:
if(ioffset[0] != 2 * ooffset[0])
return false;
break;
case rocfft_transform_type_real_inverse:
if(2 * ioffset[0] != ooffset[0])
return false;
break;
}
}
// The parameters are valid.
return true;
}
};
// This is used with the program_options class so that the user can type an integer on the
// command line and we store into an enum varaible
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_array_type& atype)
{
unsigned tmp;
stream >> tmp;
atype = rocfft_array_type(tmp);
return stream;
}
// similarly for transform type
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_transform_type& ttype)
{
unsigned tmp;
stream >> tmp;
ttype = rocfft_transform_type(tmp);
return stream;
}
// count the number of total iterations for 1-, 2-, and 3-D dimensions
template <typename T1>
size_t count_iters(const T1& i)
{
return i;
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i);
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i) * std::get<2>(i);
}
// Work out how many partitions to break our iteration problem into
template <typename T1>
static size_t compute_partition_count(T1 length)
{
#ifdef BUILD_CLIENTS_TESTS_OPENMP
// we seem to get contention from too many threads, which slows
// things down. particularly noticeable with mix_3D tests
static const size_t MAX_PARTITIONS = 8;
size_t iters = count_iters(length);
size_t hw_threads = std::min(MAX_PARTITIONS, static_cast<size_t>(omp_get_num_procs()));
if(!hw_threads)
return 1;
// don't bother threading problem sizes that are too small. pick
// an arbitrary number of iterations and ensure that each thread
// has at least that many iterations to process
static const size_t MIN_ITERS_PER_THREAD = 2048;
// either use the whole CPU, or use ceil(iters/iters_per_thread)
return std::min(hw_threads, (iters + MIN_ITERS_PER_THREAD + 1) / MIN_ITERS_PER_THREAD);
#else
return 1;
#endif
}
// Break a scalar length into some number of pieces, returning
// [(start0, end0), (start1, end1), ...]
template <typename T1>
std::vector<std::pair<T1, T1>> partition_base(const T1& length, size_t num_parts)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
// make sure we don't exceed the length
num_parts = std::min(length, num_parts);
std::vector<std::pair<T1, T1>> ret(num_parts);
auto partition_size = length / num_parts;
T1 cur_partition = 0;
for(size_t i = 0; i < num_parts; ++i, cur_partition += partition_size)
{
ret[i].first = cur_partition;
ret[i].second = cur_partition + partition_size;
}
// last partition might not divide evenly, fix it up
ret.back().second = length;
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_rowmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the leftmost part of the tuple, for row-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<2>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<2>(ret[i].second) = std::get<2>(length);
}
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_colmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the rightmost part of the tuple, for col-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_colmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<1>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<1>(ret[i].first) = partitions[i].first;
std::get<0>(ret[i].first) = 0;
std::get<1>(ret[i].second) = partitions[i].second;
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_colmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<2>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<2>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].first) = 0;
std::get<2>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
// Specialized computation of index given 1-, 2-, 3- dimension length + stride
template <typename T1, typename T2>
size_t compute_index(T1 length, T2 stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (length * stride) + base;
}
template <typename T1, typename T2>
size_t
compute_index(const std::tuple<T1, T1>& length, const std::tuple<T2, T2>& stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ base;
}
template <typename T1, typename T2>
size_t compute_index(const std::tuple<T1, T1, T1>& length,
const std::tuple<T2, T2, T2>& stride,
size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ (std::get<2>(length) * std::get<2>(stride)) + base;
}
// Given a length vector, set the rest of the strides.
// The optional argument stride0 sets the stride for the contiguous dimension.
// The optional rcpadding argument sets the stride correctly for in-place
// multi-dimensional real/complex transforms.
// Format is row-major.
template <typename T1>
inline std::vector<T1> compute_stride(const std::vector<T1>& length,
const std::vector<size_t>& stride0 = std::vector<size_t>(),
const bool rcpadding = false)
{
const int dim = length.size();
std::vector<T1> stride(dim);
int dimoffset = 0;
if(stride0.size() == 0)
{
// Set the contiguous stride:
stride[dim - 1] = 1;
dimoffset = 1;
}
else
{
// Copy the input values to the end of the stride array:
for(int i = 0; i < stride0.size(); ++i)
{
stride[dim - stride0.size() + i] = stride0[i];
}
}
if(stride0.size() < dim)
{
// Compute any remaining values via recursion.
for(int i = dim - dimoffset - stride0.size(); i-- > 0;)
{
auto lengthip1 = length[i + 1];
if(rcpadding && i == dim - 2)
{
lengthip1 = 2 * (lengthip1 / 2 + 1);
}
stride[i] = stride[i + 1] * lengthip1;
}
}
return stride;
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input and output
// types are identical.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to1(const Tval* input,
Tval* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]] = input[idx + ioffset[0]];
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// planar and the output type is complex interleaved.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_2to1(const Tval* input0,
const Tval* input1,
std::complex<Tval>* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]]
= std::complex<Tval>(input0[idx + ioffset[0]], input1[idx + ioffset[1]]);
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// complex interleaved and the output type is planar.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to2(const std::complex<Tval>* input,
Tval* output0,
Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output0[odx + ooffset[0]] = input[idx + ioffset[0]].real();
output1[odx + ooffset[1]] = input[idx + ioffset[0]].imag();
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type given
// by itype, and the output type is given by otype.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
// copy 1to2
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<float*>(output[0].data()),
reinterpret_cast<float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<double*>(output[0].data()),
reinterpret_cast<double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
// copy 2 to 1
switch(precision)
{
case rocfft_precision_single:
copy_buffers_2to1(reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_2to1(reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
}
// unroll arbitrary-dimension copy_buffers into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return copy_buffers(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
ioffset,
ooffset);
case 2:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
ioffset,
ooffset);
case 3:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of complex type.
struct VectorNorms
{
double l_2 = 0.0, l_inf = 0.0;
};
template <typename Tcomplex, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_complex(const Tcomplex* input,
const Tcomplex* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_colmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output[odx + ooffset[0]].real() - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output[odx + ooffset[0]].imag() - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of real type.
template <typename Tfloat, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_real(const Tfloat* input,
const Tfloat* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double diff = std::abs(output[odx + ooffset[0]] - input[idx + ioffset[0]]);
cur_linf = std::max(diff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += diff * diff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. input is complex-interleaved, output is complex-planar.
template <typename Tval, typename Tint1, typename T2, typename T3>
inline VectorNorms distance_1to2(const std::complex<Tval>* input,
const Tval* output0,
const Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const T3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const auto odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output0[odx + ooffset[0]] - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output1[odx + ooffset[1]] - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-inifnity and L-2 distance between two buffers of dimension length and
// with types given by itype, otype, and precision.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
VectorNorms dist;
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms d;
switch(precision)
{
case rocfft_precision_single:
d = distance_1to1_real(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<const float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
d = distance_1to1_real(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<const double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_inf = std::max(d.l_inf, dist.l_inf);
dist.l_2 += d.l_2 * d.l_2;
}
break;
default:
throw std::runtime_error("Invalid input and output types.");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const float*>(output[0].data()),
reinterpret_cast<const float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const double*>(output[0].data()),
reinterpret_cast<const double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(output[0].data()),
reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(output[0].data()),
reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
dist.l_2 = sqrt(dist.l_2);
return dist;
}
// Unroll arbitrary-dimension distance into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return distance(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 2:
return distance(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 3:
return distance(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 norm of a buffer with strides istride and
// length idist. Data is std::complex.
template <typename Tcomplex, typename T1, typename T2>
inline VectorNorms norm_complex(const Tcomplex* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const double rval = std::abs(input[idx + offset[0]].real());
cur_linf = std::max(rval, cur_linf);
cur_l2 += rval * rval;
const double ival = std::abs(input[idx + offset[0]].imag());
cur_linf = std::max(ival, cur_linf);
cur_l2 += ival * ival;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data is real-valued.
template <typename Tfloat, typename T1, typename T2>
inline VectorNorms norm_real(const Tfloat* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const auto idx = compute_index(index, istride, idx_base);
const double val = std::abs(input[idx + offset[0]]);
cur_linf = std::max(val, cur_linf);
cur_l2 += val * val;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data format is given by precision and itype.
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const T1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
VectorNorms norm;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
norm = norm_complex(reinterpret_cast<const std::complex<float>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
norm = norm_complex(reinterpret_cast<const std::complex<double>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_2 *= norm.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms n;
switch(precision)
{
case rocfft_precision_single:
n = norm_real(reinterpret_cast<const float*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
n = norm_real(reinterpret_cast<const double*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_inf = std::max(n.l_inf, norm.l_inf);
norm.l_2 += n.l_2 * n.l_2;
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
norm.l_2 = sqrt(norm.l_2);
return norm;
}
// Unroll arbitrary-dimension norm into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<T1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type type,
const std::vector<T2>& stride,
const size_t dist,
const std::vector<size_t>& offset)
{
switch(length.size())
{
case 1:
return norm(input, length[0], nbatch, precision, type, stride[0], dist, offset);
case 2:
return norm(input,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1]),
dist,
offset);
case 3:
return norm(input,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1], stride[2]),
dist,
offset);
default:
abort();
}
}
// Given a buffer of complex values stored in a vector of chars (or two vectors in the
// case of planar format), impose Hermitian symmetry.
// NB: length is the dimensions of the FFT, not the data layout dimensions.
template <typename Tfloat, typename Tallocator, typename Tsize>
inline void impose_hermitian_symmetry(std::vector<std::vector<char, Tallocator>>& vals,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride,
const Tsize idist,
const Tsize nbatch)
{
switch(vals.size())
{
case 1:
{
// Complex interleaved data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto data = ((std::complex<Tfloat>*)vals[0].data()) + ibatch * idist;
switch(length.size())
{
case 3:
if(length[2] % 2 == 0)
{
data[istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[1] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[1] * (length[1] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0 && length[2] % 2 == 0)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)
+ istride[2] * (length[2] / 2)]
.imag(0.0);
// clang format off
}
// y-axis:
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j)] = std::conj(data[istride[1] * j]);
}
if(length[0] % 2 == 0)
{
// y-axis at x-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j]);
// clang format on
}
}
// x-axis:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
// x-axis at y-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
// clang format on
}
}
// x-y plane:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * i + istride[1] * j]);
// clang format on
}
}
if(length[2] % 2 == 0)
{
// x-axis at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
if(length[1] % 2 == 0)
{
// x-axis at yz-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
}
// y-axis: at z-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[1] * j + istride[2] * (length[2] / 2)]);
}
if(length[0] % 2 == 0)
{
// y-axis: at xz-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
// x-y plane: at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
}
// fall-through
case 2:
if(length[1] % 2 == 0)
{
data[istride[1] * (length[1] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)].imag(0.0);
}
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
}
}
// fall-through
case 1:
data[0].imag(0.0);
if(length[0] % 2 == 0)
{
data[istride[0] * (length[0] / 2)].imag(0.0);
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
case 2:
{
// Complex planar data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto idata = ((Tfloat*)vals[1].data()) + ibatch * idist;
switch(length.size())
{
case 3:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 2:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 1:
idata[0] = 0.0;
if(length[0] % 2 == 0)
{
idata[istride[0] * (length[0] / 2)] = 0.0;
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
default:
throw std::runtime_error("Invalid data type");
break;
}
}
// Given an array type and transform length, strides, etc, load random floats in [0,1]
// into the input array of floats/doubles or complex floats/doubles, which is stored in a
// vector of chars (or two vectors in the case of planar format).
// lengths are the memory lengths (ie not the transform parameters)
template <typename Tfloat, typename Tallocator, typename Tint1>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const Tint1& whole_length,
const Tint1& istride,
const size_t idist,
const size_t nbatch)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
{
auto idata = (std::complex<Tfloat>*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const Tfloat x = (Tfloat)gen() / (Tfloat)gen.max();
const Tfloat y = (Tfloat)gen() / (Tfloat)gen.max();
const std::complex<Tfloat> val(x, y);
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
{
auto ireal = (Tfloat*)input[0].data();
auto iimag = (Tfloat*)input[1].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(),
(Tfloat)gen() / (Tfloat)gen.max());
ireal[i] = val.real();
iimag[i] = val.imag();
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_real:
{
auto idata = (Tfloat*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const auto i = compute_index(index, istride, i_base);
const Tfloat val = (Tfloat)gen() / (Tfloat)gen.max();
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
default:
throw std::runtime_error("Input layout format not yet supported");
break;
}
}
// unroll set_input for dimension 1, 2, 3
template <typename Tfloat, typename Tallocator>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const std::vector<size_t>& length,
const std::vector<size_t>& istride,
const size_t idist,
const size_t nbatch)
{
switch(length.size())
{
case 1:
set_input<Tfloat>(input, itype, length[0], istride[0], idist, nbatch);
break;
case 2:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1]),
std::make_tuple(istride[0], istride[1]),
idist,
nbatch);
break;
case 3:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1], length[2]),
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
nbatch);
break;
default:
abort();
}
}
// Compute the idist for a given transform based on the placeness, transform type, and
// data layout.
template <typename Tsize>
inline size_t set_idist(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride)
{
const Tsize dim = length.size();
// In-place 1D transforms need extra dist.
if(transformType == rocfft_transform_type_real_forward && dim == 1
&& place == rocfft_placement_inplace)
{
return 2 * (length[0] / 2 + 1) * istride[0];
}
if(transformType == rocfft_transform_type_real_inverse && dim == 1)
{
return (length[0] / 2 + 1) * istride[0];
}
Tsize idist = (transformType == rocfft_transform_type_real_inverse)
? (length[dim - 1] / 2 + 1) * istride[dim - 1]
: length[dim - 1] * istride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
idist = std::max(length[i] * istride[i], idist);
}
return idist;
}
// Compute the odist for a given transform based on the placeness, transform type, and
// data layout. Row-major.
template <typename Tsize>
inline size_t set_odist(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const std::vector<Tsize>& length,
const std::vector<Tsize>& ostride)
{
const Tsize dim = length.size();
// In-place 1D transforms need extra dist.
if(transformType == rocfft_transform_type_real_inverse && dim == 1
&& place == rocfft_placement_inplace)
{
return 2 * (length[0] / 2 + 1) * ostride[0];
}
if(transformType == rocfft_transform_type_real_forward && dim == 1)
{
return (length[0] / 2 + 1) * ostride[0];
}
Tsize odist = (transformType == rocfft_transform_type_real_forward)
? (length[dim - 1] / 2 + 1) * ostride[dim - 1]
: length[dim - 1] * ostride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
odist = std::max(length[i] * ostride[i], odist);
}
return odist;
}
// Given a data type and precision, the distance between batches, and the batch size,
// allocate the required host buffer(s).
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> allocate_host_buffer(
const rocfft_precision precision, const rocfft_array_type type, const std::vector<size_t>& size)
{
std::vector<std::vector<char, Allocator>> buffers(size.size());
for(int i = 0; i < size.size(); ++i)
{
buffers[i].resize(size[i] * var_size<size_t>(precision, type));
}
return buffers;
}
// Given a data type and dimensions, fill the buffer, imposing Hermitian symmetry if
// necessary.
// NB: length is the logical size of the FFT, and not necessarily the data dimensions
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> compute_input(const rocfft_params& params)
{
auto input = allocate_host_buffer<Allocator>(params.precision, params.itype, params.isize);
for(auto& i : input)
{
std::fill(i.begin(), i.end(), 0.0);
}
switch(params.precision)
{
case rocfft_precision_double:
set_input<double>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
set_input<float>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
}
if(params.itype == rocfft_array_type_hermitian_interleaved
|| params.itype == rocfft_array_type_hermitian_planar)
{
switch(params.precision)
{
case rocfft_precision_double:
impose_hermitian_symmetry<double>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
impose_hermitian_symmetry<float>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
}
}
return input;
}
// Check that the input and output types are consistent.
inline void check_iotypes(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const rocfft_array_type itype,
const rocfft_array_type otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
// Check that format choices are supported
if(transformType != rocfft_transform_type_real_forward
&& transformType != rocfft_transform_type_real_inverse)
{
if(place == rocfft_placement_inplace && itype != otype)
{
throw std::runtime_error(
"In-place transforms must have identical input and output types");
}
}
bool okformat = true;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
okformat = (otype == rocfft_array_type_complex_interleaved
|| otype == rocfft_array_type_complex_planar);
break;
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
okformat = otype == rocfft_array_type_real;
break;
case rocfft_array_type_real:
okformat = (otype == rocfft_array_type_hermitian_interleaved
|| otype == rocfft_array_type_hermitian_planar);
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
okformat = false;
}
if(!okformat)
{
throw std::runtime_error("Invalid combination of Input/Output array type formats");
}
}
// Check that the input and output types are consistent. If they are unset, assign
// default values based on the transform type.
inline void check_set_iotypes(const rocfft_result_placement place,
const rocfft_transform_type transformType,
rocfft_array_type& itype,
rocfft_array_type& otype)
{
if(itype == rocfft_array_type_unset)
{
switch(transformType)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
itype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
itype = rocfft_array_type_real;
break;
case rocfft_transform_type_real_inverse:
itype = rocfft_array_type_hermitian_interleaved;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
if(otype == rocfft_array_type_unset)
{
switch(transformType)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
otype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
otype = rocfft_array_type_hermitian_interleaved;
break;
case rocfft_transform_type_real_inverse:
otype = rocfft_array_type_real;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
check_iotypes(place, transformType, itype, otype);
}
#endif
|
backprop.c | /*
libdeep - a library for deep learning
Copyright (C) 2013-2017 Bob Mottram <bob@freedombone.net>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE HOLDERS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "backprop.h"
/**
* @brief Initialise a backprop neural net
* @param net Backprop neural net object
* @param no_of_inputs The number of input units
* @param no_of_hiddens The number of units in each hidden layer
* @param hidden_layers The number of hidden layers
* @param no_of_outputs The number of output units
* @param random_seed The random number generator seed
* @returns zero on success
*/
int bp_init(bp * net,
int no_of_inputs,
int no_of_hiddens,
int hidden_layers,
int no_of_outputs,
unsigned int * random_seed)
{
bp_neuron * n;
net->learning_rate = 0.2f;
net->noise = 0.0f;
net->random_seed = *random_seed;
net->backprop_error = DEEPLEARN_UNKNOWN_ERROR;
net->backprop_error_average = DEEPLEARN_UNKNOWN_ERROR;
net->backprop_error_total = DEEPLEARN_UNKNOWN_ERROR;
net->itterations = 0;
net->pruning_cycle = 0;
net->pruning_rate = 0.1f;
net->dropout_percent = 20;
net->no_of_inputs = no_of_inputs;
NEURON_ARRAY_ALLOC(net->inputs, no_of_inputs);
if (!net->inputs)
return -1;
net->no_of_hiddens = no_of_hiddens;
net->no_of_outputs = no_of_outputs;
net->hidden_layers = hidden_layers;
NEURON_LAYERS_ALLOC(net->hiddens, hidden_layers);
if (!net->hiddens)
return -2;
COUNTDOWN(l, hidden_layers) {
NEURON_ARRAY_ALLOC(net->hiddens[l], HIDDENS_IN_LAYER(net,l));
if (!net->hiddens[l])
return -3;
}
NEURON_ARRAY_ALLOC(net->outputs, no_of_outputs);
if (!net->outputs)
return -4;
/* create inputs */
COUNTDOWN(i, net->no_of_inputs) {
NEURONALLOC(net->inputs[i]);
if (!net->inputs[i])
return -5;
if (bp_neuron_init(net->inputs[i], 1, random_seed) != 0)
return -6;
}
/* create hiddens */
COUNTUP(l, hidden_layers) {
COUNTUP(i, HIDDENS_IN_LAYER(net,l)) {
net->hiddens[l][i] = (bp_neuron*)malloc(sizeof(bp_neuron));
if (!net->hiddens[l][i])
return -7;
n = net->hiddens[l][i];
if (l == 0) {
if (bp_neuron_init(n, no_of_inputs, random_seed) != 0)
return -8;
/* connect to input layer */
COUNTDOWN(j, net->no_of_inputs)
bp_neuron_add_connection(n, j, net->inputs[j]);
}
else {
if (bp_neuron_init(n, HIDDENS_IN_LAYER(net,l-1),
random_seed) != 0)
return -9;
/* connect to previous hidden layer */
COUNTDOWN(j, HIDDENS_IN_LAYER(net,l-1))
bp_neuron_add_connection(n, j, net->hiddens[l-1][j]);
}
}
}
/* create outputs */
COUNTDOWN(i, net->no_of_outputs) {
NEURONALLOC(net->outputs[i]);
if (!net->outputs[i])
return -10;
n = net->outputs[i];
if (bp_neuron_init(n, HIDDENS_IN_LAYER(net,hidden_layers-1),
random_seed) != 0)
return -11;
COUNTDOWN(j, HIDDENS_IN_LAYER(net,hidden_layers-1))
bp_neuron_add_connection(n, j, net->hiddens[hidden_layers-1][j]);
}
return 0;
}
/**
* @brief Deallocate the memory for a backprop neural net object
* @param net Backprop neural net object
*/
void bp_free(bp * net)
{
COUNTDOWN(i, net->no_of_inputs) {
bp_neuron_free(net->inputs[i]);
free(net->inputs[i]);
net->inputs[i] = 0;
}
free(net->inputs);
COUNTDOWN(l, net->hidden_layers) {
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l)) {
bp_neuron_free(net->hiddens[l][i]);
free(net->hiddens[l][i]);
net->hiddens[l][i] = 0;
}
free(net->hiddens[l]);
net->hiddens[l] = 0;
}
free(net->hiddens);
COUNTDOWN(i, net->no_of_outputs) {
bp_neuron_free(net->outputs[i]);
free(net->outputs[i]);
net->outputs[i] = 0;
}
free(net->outputs);
}
/**
* @brief Propagates the current inputs through the layers of the network
* @param net Backprop neural net object
* @param learning Non-zero if learning
*/
void bp_feed_forward(bp * net, int learning)
{
unsigned int drop_percent = 0;
if (learning != 0)
drop_percent = (unsigned int)(net->dropout_percent*100);
/* for each hidden layer */
COUNTUP(l, net->hidden_layers) {
/* For each unit within the layer */
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l))
bp_neuron_feedForward(net->hiddens[l][i],
net->noise, drop_percent,
&net->random_seed);
}
/* for each unit in the output layer */
COUNTDOWN(i, net->no_of_outputs)
bp_neuron_feedForward(net->outputs[i], net->noise, drop_percent,
&net->random_seed);
}
/**
* @brief Propagates the current inputs through a given number of
* layers of the network
* @param net Backprop neural net object
* @param layers The number of layers to propagate through
*/
void bp_feed_forward_layers(bp * net, int layers)
{
unsigned int drop_percent =
(unsigned int)(net->dropout_percent*100);
/* for each hidden layer */
COUNTUP(l, layers) {
/* if this layer is a hidden layer */
if (l < net->hidden_layers) {
/* For each unit within the layer */
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l))
bp_neuron_feedForward(net->hiddens[l][i],
net->noise, drop_percent,
&net->random_seed);
}
else {
/* For each unit within the output layer */
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, net->no_of_outputs)
bp_neuron_feedForward(net->outputs[i],
net->noise, drop_percent,
&net->random_seed);
}
}
}
/**
* @brief back-propogate errors from the output layer towards the input layer
* @param net Backprop neural net object
*/
void bp_backprop(bp * net, int current_hidden_layer)
{
int neuron_count=0;
int start_hidden_layer = current_hidden_layer-1;
float errorPercent=0;
/* clear all previous backprop errors */
COUNTDOWN(i, net->no_of_inputs)
net->inputs[i]->backprop_error = 0;
/* for every hidden layer */
if (start_hidden_layer < 0)
start_hidden_layer = 0;
FOR(l, start_hidden_layer, net->hidden_layers) {
/* For each unit within the layer */
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l))
net->hiddens[l][i]->backprop_error = 0;
}
/* now back-propogate the error from the output units */
net->backprop_error_total = 0;
/* for every output unit */
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, net->no_of_outputs) {
bp_neuron_backprop(net->outputs[i]);
}
COUNTDOWN(i, net->no_of_outputs) {
/* update the total error which is used to assess
network performance */
net->backprop_error_total += net->outputs[i]->backprop_error;
errorPercent += fabs(net->outputs[i]->backprop_error);
}
neuron_count += net->no_of_outputs;
/* convert summed error to an overall percentage */
errorPercent = errorPercent * 100 /
(NEURON_RANGE*net->no_of_outputs);
/* error on the output units */
net->backprop_error = fabs(net->backprop_error_total / net->no_of_outputs);
/* update the running average */
if (net->backprop_error_average == DEEPLEARN_UNKNOWN_ERROR) {
net->backprop_error_average = net->backprop_error;
net->backprop_error_percent = errorPercent;
}
else {
net->backprop_error_average =
(net->backprop_error_average*0.999f) +
(net->backprop_error*0.001f);
net->backprop_error_percent =
(net->backprop_error_percent*0.999f) +
(errorPercent*0.001f);
}
/* back-propogate through the hidden layers */
for (int l = net->hidden_layers-1; l >= start_hidden_layer; l--) {
/* for every unit in the hidden layer */
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l)) {
bp_neuron_backprop(net->hiddens[l][i]);
}
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l)) {
/* update the total error which is used to assess
network performance */
net->backprop_error_total += net->hiddens[l][i]->backprop_error;
}
neuron_count += HIDDENS_IN_LAYER(net,l);
}
/* overall average error */
net->backprop_error_total =
fabs(net->backprop_error_total / neuron_count);
/* increment the number of training itterations */
if (net->itterations < UINT_MAX)
net->itterations++;
}
/**
* @brief Reprojects the input layer from a given hidden layer neuron
* This is like feedforward in reverse, and allows you
* to visualise what a hidden layer neuron is representing
* @param layer The hidden layer within which the neuron resides
* @param neuron_index The hidden layer index of the neuron to be reprojected
*/
void bp_reproject(bp * net, int layer, int neuron_index)
{
bp_neuron * n;
/* clear all previous backprop errors */
COUNTDOWN(i, net->no_of_inputs)
net->inputs[i]->value_reprojected = 0;
/* for every hidden layer */
COUNTDOWN(l, layer) {
/* For each unit within the layer */
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l))
net->hiddens[l][i]->value_reprojected = 0;
}
/* set the neuron active */
n = net->hiddens[layer][neuron_index];
n->value_reprojected = NEURON_HIGH;
bp_neuron_reproject(n);
if (layer > 0) {
/* apply the sigmoid function in the previous layer,
as with feedforward */
COUNTDOWN(i, HIDDENS_IN_LAYER(net,layer-1)) {
n = net->hiddens[layer-1][i];
n->value_reprojected =
AF(n->value_reprojected);
}
}
/* reproject through the hidden layers */
for (int l = layer-1; l > 0; l--) {
/* for every unit in the hidden layer */
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l))
bp_neuron_reproject(net->hiddens[l][i]);
/* apply the sigmoid function in the previous layer,
as with feedforward */
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l-1)) {
n = net->hiddens[l-1][i];
n->value_reprojected =
AF(n->value_reprojected);
}
}
}
/**
* @brief Adjust connection weights and bias values
* @param net Backprop neural net object
* @param current_hidden_layer The hidden layer currently being trained
*/
void bp_learn(bp * net, int current_hidden_layer)
{
int start_hidden_layer = current_hidden_layer-1;
/* for each hidden layers */
if (start_hidden_layer < 0)
start_hidden_layer = 0;
FOR(l, start_hidden_layer, net->hidden_layers) {
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l))
bp_neuron_learn(net->hiddens[l][i], net->learning_rate);
}
#pragma omp parallel for schedule(static) num_threads(DEEPLEARN_THREADS)
COUNTDOWN(i, net->no_of_outputs)
bp_neuron_learn(net->outputs[i], net->learning_rate);
/* perform periodic pruning of weights so that there is a cycle
of growth and pruning */
if (net->pruning_cycle != 0) {
if (net->itterations % net->pruning_cycle == 0) {
if (net->itterations > 0) {
bp_prune_weights(net, net->pruning_rate);
}
}
}
}
/**
* @brief Set the value of an input
* @param net Backprop neural net object
* @param index The index number of the input unit
* @param value The value to set the unit to in the range 0.0 to 1.0
*/
void bp_set_input(bp * net, int index, float value)
{
net->inputs[index]->value = value;
}
/**
* @brief Normalises the input values
* @param net Backprop neural net object
*/
void bp_normalise_inputs(bp * net)
{
float max = 0, min = 1;
COUNTDOWN(i, net->no_of_inputs) {
if (net->inputs[i]->value > max)
max = net->inputs[i]->value;
if (net->inputs[i]->value < min)
min = net->inputs[i]->value;
}
float range = max - min;
if (range > 0.00001f) {
COUNTDOWN(i, net->no_of_inputs)
net->inputs[i]->value =
NEURON_LOW +
((net->inputs[i]->value-min)*NEURON_RANGE/range);
}
}
/**
* @brief Sets the inputs to a text string
* @param net Backprop neural net object
* @param text The text string
*/
void bp_set_input_text(bp * net, char * text)
{
enc_text_to_binary(text, net->inputs, net->no_of_inputs, 0, strlen(text));
}
/**
* @brief Set the inputs of the network from a patch within an image.
* It is assumed that the image is mono (1 byte per pixel)
* @param net Backprop neural net object
* @param img Array storing the image
* @param image_width Width of the image in pixels
* @param image_height Height of the image in pixels
* @param tx Top left x coordinate of the patch within the image
* @param ty Top left y coordinate of the patch within the image
* @returns zero on success
*/
int bp_inputs_from_image_patch(bp * net,
unsigned char img[],
int image_width, int image_height,
int tx, int ty)
{
int i = 0, idx;
/* The patch size is calculated from the number of inputs
of the neural net. It's assumed to be square. */
int patch_size = (int)sqrt(net->no_of_inputs);
/* make sure that the patch fits within the number of inputs */
if (patch_size*patch_size > net->no_of_inputs)
return 1;
/* set the inputs from the patch */
FOR(py, ty, ty+patch_size) {
if (py >= image_height) break;
FOR(px, tx, tx+patch_size) {
if (px >= image_width) break;
/* array index within the image */
idx = (py*image_width) + px;
/* set the input value within the range */
bp_set_input(net, i,
NEURON_LOW +
(img[idx]*NEURON_RANGE/255.0f));
i++;
}
}
return 0;
}
/**
* @brief Set the inputs of the network from an image.
* It is assumed that the image is mono (1 byte per pixel)
* @param net Backprop neural net object
* @param img Array storing the image
* @param image_width Width of the image in pixels
* @param image_height Height of the image in pixels
* @returns zero on success
*/
int bp_inputs_from_image(bp * net,
unsigned char img[],
int image_width, int image_height)
{
/* check that the number of inputs is the same as the
number of pixels */
if (net->no_of_inputs != image_width*image_height)
return 1;
/* set the input values within the range */
COUNTDOWN(i, image_width*image_height)
bp_set_input(net, i,
NEURON_LOW + (img[i]*NEURON_RANGE/255.0f));
return 0;
}
/**
* @brief Plots weight matrices within an image
* @param net Backprop neural net object
* @param filename Filename of the image to save as
* @param image_width Width of the image in pixels
* @param image_height Height of the image in pixels
* @param input_image_width When displaying all inputs as an image this
is the number of inputs across. Set this to zero for the
inputs image to be square.
*/
int bp_plot_weights(bp * net,
char * filename,
int image_width, int image_height,
int input_image_width)
{
int neurons_x, neurons_y, ty, by, h, ix, iy;
int wx, wy, inputs_x, inputs_y, n, i, unit, no_of_neurons;
int no_of_weights,wdth, max_unit;
float neuronx, neurony, dw, db, min_bias, max_bias;
float min_activation, max_activation, da;
bp_neuron ** neurons, * curr_neuron;
unsigned char * img;
/* allocate memory for the image */
UCHARALLOC(img, image_width*image_height*3);
if (!img)
return -1;
/* clear the image with a white background */
memset((void*)img, '\255',
image_width*image_height*3*sizeof(unsigned char));
/* dimension of the neurons matrix for each layer */
neurons_x = (int)sqrt(net->no_of_hiddens);
neurons_y = (net->no_of_hiddens/neurons_x);
/* dimensions of the weight matrix */
if (input_image_width <= 0)
inputs_x = (int)sqrt(net->no_of_inputs);
else
inputs_x = input_image_width;
inputs_y = (net->no_of_inputs/inputs_x);
no_of_weights = net->no_of_inputs;
/* plot the inputs */
ty = 0;
by = image_height/(net->hidden_layers+3);
h = (by-ty)*95/100;
wdth = h;
if (wdth>=image_width) wdth=image_width;
COUNTUP(y, h) {
iy = y*inputs_y/h;
COUNTUP(x, wdth) {
ix = x*inputs_x/wdth;
unit = (iy*inputs_x) + ix;
if (unit < net->no_of_inputs) {
n = (y*image_width + x)*3;
img[n] = (unsigned char)(net->inputs[unit]->value*255);
img[n+1] = img[n];
img[n+2] = img[n];
}
}
}
COUNTUP(layer, net->hidden_layers+1) {
/* vertical top and bottom coordinates */
ty = (layer+1)*image_height/(net->hidden_layers+3);
by = (layer+2)*image_height/(net->hidden_layers+3);
h = (by-ty)*95/100;
/* reset ranges */
min_bias = 9999999.0f;
max_bias = -9999999.0f;
min_activation = 9999999.0f;
max_activation = -9999999.0f;
/* number of patches across and down for the final layer */
if (layer == net->hidden_layers) {
neurons_x = (int)sqrt(net->no_of_outputs);
neurons_y = (net->no_of_outputs/neurons_x);
neurons = net->outputs;
no_of_neurons = net->no_of_outputs;
max_unit = net->no_of_outputs;
}
else {
neurons_x = (int)sqrt(HIDDENS_IN_LAYER(net,layer));
neurons_y = (HIDDENS_IN_LAYER(net,layer)/neurons_x);
neurons = net->hiddens[layer];
no_of_neurons = HIDDENS_IN_LAYER(net,layer);
max_unit = HIDDENS_IN_LAYER(net,layer);
}
/* get the bias range within this layer */
COUNTUP(y, max_unit) {
if (neurons[y]->bias < min_bias) min_bias = neurons[y]->bias;
if (neurons[y]->bias > max_bias) max_bias = neurons[y]->bias;
if (neurons[y]->value < min_activation)
min_activation = neurons[y]->value;
if (neurons[y]->value > max_activation)
max_activation = neurons[y]->value;
}
/* update ranges */
db = max_bias - min_bias;
da = max_activation - min_activation;
/* for every pixel within the region */
FOR(y, ty, by) {
neurony = (y-ty)*neurons_y/(float)h;
/* y coordinate within the weights */
wy = (neurony - (int)neurony)*inputs_y;
COUNTUP(x, image_width) {
neuronx = x*neurons_x/(float)image_width;
/* x coordinate within the weights */
wx = (neuronx - (int)neuronx)*inputs_x;
/* coordinate within the image */
n = ((y * image_width) + x)*3;
/* weight index */
i = (wy*inputs_x) + wx;
if (i < no_of_weights) {
/* neuron index */
unit = ((int)neurony*neurons_x) + (int)neuronx;
if (unit < no_of_neurons) {
curr_neuron = neurons[unit];
dw = curr_neuron->max_weight -
curr_neuron->min_weight;
if (dw > 0.0001f) {
img[n] =
(int)((curr_neuron->weights[i] -
curr_neuron->min_weight)*255/dw);
img[n+1] =
(int)((curr_neuron->bias - min_bias)*255/db);
img[n+2] =
(int)((curr_neuron->value - min_activation)*
255/da);
}
else {
img[n] =
(int)(curr_neuron->weights[i]*255);
img[n+1] = img[n];
img[n+2] = img[n];
}
}
}
}
}
if (layer < net->hidden_layers) {
/* dimensions of the weight matrix for the next layer */
inputs_x = (int)sqrt(HIDDENS_IN_LAYER(net,layer));
inputs_y = (HIDDENS_IN_LAYER(net,layer)/inputs_x);
no_of_weights = HIDDENS_IN_LAYER(net,layer);
}
}
ty = (net->hidden_layers+2)*image_height/(net->hidden_layers+3);
by = (net->hidden_layers+3)*image_height/(net->hidden_layers+3);
h = (by-ty)*95/100;
inputs_x = (int)sqrt(net->no_of_outputs);
inputs_y = (net->no_of_outputs/inputs_x);
wdth = h;
if (wdth >= image_width) wdth = image_width;
COUNTUP(y, h) {
iy = y*inputs_y/h;
COUNTUP(x, wdth) {
ix = x*inputs_x/wdth;
unit = (iy*inputs_x) + ix;
if (unit < net->no_of_outputs) {
n = ((ty+y)*image_width + x)*3;
img[n] = (unsigned char)(net->outputs[unit]->value*255);
img[n+1] = img[n];
img[n+2] = img[n];
}
}
}
/* write the image to file */
deeplearn_write_png_file(filename,
(unsigned int)image_width,
(unsigned int)image_height,
24, img);
/* free the image memory */
free(img);
return 0;
}
/**
* @brief Returns the value of one of the input units
* @param net Backprop neural net object
* @param index Index of the input unit
* @return value in the range 0.0 to 1.0
*/
float bp_get_input(bp * net, int index)
{
return net->inputs[index]->value;
}
/**
* @brief Sets the value of one of the output units
* @param net Backprop neural net object
* @param index Index of the output unit
* @param value The value to set the output to in the range 0.0 to 1.0
*/
void bp_set_output(bp * net, int index, float value)
{
net->outputs[index]->desired_value = value;
}
/**
* @brief Gets the value of one of the hidden units
* @param net Backprop neural net object
* @param layer Index number of the hidden layer
* @param index Index of the unit within the given hidden layer
* @return value in the range 0.0 to 1.0
*/
float bp_get_hidden(bp * net, int layer, int index)
{
return net->hiddens[layer][index]->value;
}
/**
* @brief Gets the value of one of the output units
* @param net Backprop neural net object
* @param index Index of the unit within the output layer
* @return value in the range 0.0 to 1.0
*/
float bp_get_output(bp * net, int index)
{
return net->outputs[index]->value;
}
/**
* @brief Gets the desired value of one of the output units
* @param net Backprop neural net object
* @param index Index of the unit within the output layer
* @return Desired value in the range 0.0 to 1.0
*/
float bp_get_desired(bp * net, int index)
{
return net->outputs[index]->desired_value;
}
/**
* @brief Exclusion flags indicate that a unit has temporarily dropped out.
* This clears the all the exclusion flags
* @param net Backprop neural net object
*/
static void bp_clear_dropouts(bp * net)
{
/* if no dropouts then don't continue */
if (net->dropout_percent == 0) return;
/* for every hidden layer */
COUNTDOWN(l, net->hidden_layers) {
COUNTDOWN(i, HIDDENS_IN_LAYER(net,l))
net->hiddens[l][i]->excluded = 0;
}
}
/**
* @brief Returns the average weight change for a given layer
* @param net Backprop neural net object
* @param layer_index Index of the layer
* @returns average weight change
*/
float bp_weight_gradient_mean(bp * net, int layer_index)
{
float total_weight_change = 0;
int no_of_neurons = HIDDENS_IN_LAYER(net, layer_index);
int inputs = net->hiddens[layer_index][0]->no_of_inputs;
COUNTDOWN(i, no_of_neurons) {
bp_neuron * n = net->hiddens[layer_index][i];
COUNTDOWN(w, inputs)
total_weight_change += fabs(n->last_weight_change[w]);
}
return total_weight_change * 10000000 / (float)(inputs * no_of_neurons);
}
/**
* @brief Returns a weight histogram with values in the range 0 -> 1000
* @param net Backprop neural net object
* @param histogram Histogram array
* @param buckets Number of histogram buckets
* @param max_value The maximum weight magnitude
*/
void bp_weight_histogram(bp * net,
unsigned int histogram[], int buckets,
float max_value)
{
UINTCLEAR(histogram, buckets);
COUNTDOWN(l, net->hidden_layers) {
int no_of_neurons = HIDDENS_IN_LAYER(net, l);
int inputs = net->hiddens[l][0]->no_of_inputs;
COUNTDOWN(i, no_of_neurons) {
bp_neuron * n = net->hiddens[l][i];
COUNTDOWN(w, inputs) {
float value = fabs(n->weights[w]);
if (value < max_value) {
histogram[(int)(value * buckets / max_value)]++;
}
}
}
}
/* normalize */
unsigned int max = 1;
COUNTDOWN(i, buckets) {
if (histogram[i] > max)
max = histogram[i];
}
COUNTDOWN(i, buckets) {
histogram[i] = histogram[i] * 1000 / max;
}
}
/**
* @brief Sets small weights to zero
* @param net Backprop neural net object
* @param threshold Pruning threshold in the range 0.0 -> 1.0
* @returns The percent of weights pruned
*/
int bp_prune_weights(bp * net, float threshold)
{
float mean = 0;
unsigned int hits = 0;
unsigned int pruned = 0;
COUNTDOWN(l, net->hidden_layers) {
int no_of_neurons = HIDDENS_IN_LAYER(net, l);
int inputs = net->hiddens[l][0]->no_of_inputs;
hits += no_of_neurons*inputs;
COUNTDOWN(i, no_of_neurons) {
bp_neuron * n = net->hiddens[l][i];
COUNTDOWN(w, inputs) {
mean += fabs(n->weights[w]);
}
}
}
COUNTDOWN(i, net->no_of_outputs) {
bp_neuron * n = net->outputs[i];
COUNTDOWN(w, n->no_of_inputs) {
mean += fabs(n->weights[w]);
hits++;
}
}
if (hits == 0)
return 0;
mean /= (float)hits;
threshold = mean * threshold;
/* set weights to zero if they are below the pruning threshold */
COUNTDOWN(l, net->hidden_layers) {
int no_of_neurons = HIDDENS_IN_LAYER(net, l);
int inputs = net->hiddens[l][0]->no_of_inputs;
COUNTDOWN(i, no_of_neurons) {
bp_neuron * n = net->hiddens[l][i];
COUNTDOWN(w, inputs) {
if (fabs(n->weights[w]) < threshold) {
n->weights[w] = 0;
pruned++;
}
}
}
}
COUNTDOWN(i, net->no_of_outputs) {
bp_neuron * n = net->outputs[i];
COUNTDOWN(w, n->no_of_inputs) {
if (fabs(n->weights[w]) < threshold) {
n->weights[w] = 0;
pruned++;
}
}
}
return (int)(pruned * 100 / hits);
}
/**
* @brief Returns the standard deviation of the weight change for a given layer
* @param net Backprop neural net object
* @param layer_index Index of the layer
* @returns standard deviation of weight change
*/
float bp_weight_gradient_std(bp * net, int layer_index)
{
float mean_weight_change = 0;
float total_deviation = 0;
int no_of_neurons = HIDDENS_IN_LAYER(net, layer_index);
int inputs = net->hiddens[layer_index][0]->no_of_inputs;
/* calculate the average weight magnitude */
COUNTDOWN(i, no_of_neurons) {
bp_neuron * n = net->hiddens[layer_index][i];
COUNTDOWN(w, inputs) {
mean_weight_change += n->last_weight_change[w];
}
}
mean_weight_change /= (no_of_neurons * inputs);
/* sum of percentage deviation from the average weight magnitude */
if (fabs(mean_weight_change) > 0.0000000001f) {
COUNTDOWN(i, no_of_neurons) {
bp_neuron * n = net->hiddens[layer_index][i];
COUNTDOWN(w, inputs) {
total_deviation +=
fabs((n->last_weight_change[w] - mean_weight_change)/mean_weight_change);
}
}
}
return total_deviation * 100 / (no_of_neurons * inputs);
}
/**
* @brief Randomly sets exclusion flags to cause units to drop out
* @param net Backprop neural net object
*/
static void bp_dropouts(bp * net)
{
int no_of_dropouts, hidden_units=0;
if (net->dropout_percent == 0) return;
/* total number of hidden units */
COUNTDOWN(l, net->hidden_layers)
hidden_units += HIDDENS_IN_LAYER(net,l);
/* total number of dropouts */
no_of_dropouts = net->dropout_percent*hidden_units/100;
/* set the exclusion flags */
COUNTDOWN(n, no_of_dropouts) {
int l = rand_num(&net->random_seed)%net->hidden_layers;
int i = rand_num(&net->random_seed)%HIDDENS_IN_LAYER(net,l);
net->hiddens[l][i]->excluded = 1;
}
}
/**
* @brief Update the neural net during training
* @param net Backprop neural net object
*/
void bp_update(bp * net, int current_hidden_layer)
{
bp_dropouts(net);
bp_feed_forward(net, 1);
bp_backprop(net, current_hidden_layer);
bp_learn(net, current_hidden_layer);
bp_clear_dropouts(net);
}
/**
* @brief Save a neural network to file
* @brief fp File pointer
* @param net Backprop neural net object
* @return zero on success
*/
int bp_save(FILE * fp, bp * net)
{
if (UINTWRITE(net->itterations) == 0)
return -1;
if (UINTWRITE(net->pruning_cycle) == 0)
return -2;
if (FLOATWRITE(net->pruning_rate) == 0)
return -3;
if (INTWRITE(net->no_of_inputs) == 0)
return -4;
if (INTWRITE(net->no_of_hiddens) == 0)
return -5;
if (INTWRITE(net->no_of_outputs) == 0)
return -6;
if (INTWRITE(net->hidden_layers) == 0)
return -7;
if (FLOATWRITE(net->learning_rate) == 0)
return -8;
if (FLOATWRITE(net->noise) == 0)
return -9;
if (FLOATWRITE(net->backprop_error_average) == 0)
return -10;
if (FLOATWRITE(net->dropout_percent) == 0)
return -11;
if (UINTWRITE(net->random_seed) == 0)
return -12;
COUNTUP(l, net->hidden_layers) {
COUNTUP(i, HIDDENS_IN_LAYER(net,l))
bp_neuron_save(fp,net->hiddens[l][i]);
}
COUNTUP(i, net->no_of_outputs)
bp_neuron_save(fp,net->outputs[i]);
return 0;
}
/**
* @brief Load a network from file
* @brief fp File pointer
* @param net Backprop neural net object
* @returns zero on success
*/
int bp_load(FILE * fp, bp * net)
{
int no_of_inputs=0, no_of_hiddens=0, no_of_outputs=0;
int hidden_layers=0;
float learning_rate=0, noise=0, backprop_error_average=0;
float dropout_percent=0,pruning_rate=0;
unsigned int itterations=0;
unsigned int pruning_cycle=0;
unsigned int random_seed=0;
if (UINTREAD(itterations) == 0)
return -1;
if (UINTREAD(pruning_cycle) == 0)
return -2;
if (FLOATREAD(pruning_rate) == 0)
return -3;
if (INTREAD(no_of_inputs) == 0)
return -4;
if (INTREAD(no_of_hiddens) == 0)
return -5;
if (INTREAD(no_of_outputs) == 0)
return -6;
if (INTREAD(hidden_layers) == 0)
return -7;
if (FLOATREAD(learning_rate) == 0)
return -8;
if (FLOATREAD(noise) == 0)
return -9;
if (FLOATREAD(backprop_error_average) == 0)
return -10;
if (FLOATREAD(dropout_percent) == 0)
return -11;
if (UINTREAD(random_seed) == 0)
return -12;
if (bp_init(net, no_of_inputs, no_of_hiddens,
hidden_layers, no_of_outputs,
&random_seed) != 0)
return -13;
COUNTUP(l, net->hidden_layers) {
COUNTUP(i, HIDDENS_IN_LAYER(net,l)) {
if (bp_neuron_load(fp,net->hiddens[l][i]) != 0)
return -14;
}
}
COUNTUP(i, net->no_of_outputs) {
if (bp_neuron_load(fp,net->outputs[i]) != 0)
return -15;
}
net->learning_rate = learning_rate;
net->noise = noise;
net->backprop_error_average = backprop_error_average;
net->backprop_error = backprop_error_average;
net->backprop_error_total = backprop_error_average;
net->itterations = itterations;
net->dropout_percent = dropout_percent;
net->pruning_cycle = pruning_cycle;
net->pruning_rate = pruning_rate;
return 0;
}
/**
* @brief compares two networks and returns a greater than
* zero value if they are the same
* @param net1 The first backprop neural net object
* @param net2 The second backprop neural net object
*/
int bp_compare(bp * net1, bp * net2)
{
int retval;
if (net1->no_of_inputs != net2->no_of_inputs)
return -1;
if (net1->no_of_hiddens != net2->no_of_hiddens)
return -2;
if (net1->no_of_outputs != net2->no_of_outputs)
return -3;
if (net1->hidden_layers != net2->hidden_layers)
return -4;
if (net1->learning_rate != net2->learning_rate)
return -5;
if (net1->pruning_cycle != net2->pruning_cycle)
return -6;
if (net1->pruning_rate != net2->pruning_rate)
return -7;
if (net1->noise != net2->noise)
return -8;
COUNTDOWN(l, net1->hidden_layers) {
COUNTDOWN(i, HIDDENS_IN_LAYER(net1,l)) {
retval =
bp_neuron_compare(net1->hiddens[l][i],
net2->hiddens[l][i]);
if (retval == 0)
return -9;
}
}
COUNTDOWN(i, net1->no_of_outputs) {
retval = bp_neuron_compare(net1->outputs[i], net2->outputs[i]);
if (retval == 0)
return -10;
}
if (net1->itterations != net2->itterations)
return -11;
if (net1->backprop_error_average != net2->backprop_error_average)
return -12;
if (net1->dropout_percent!= net2->dropout_percent)
return -13;
return 1;
}
/**
* @brief Extract the classification string from the filename.
* This assumes a filename of the type class.instance.extension
* @param filename The filename to examine
* @param classification The returned classification
*/
void bp_get_classification_from_filename(char * filename,
char * classification)
{
int j, start=0;
/* start with an empty classification string */
classification[0] = 0;
/* find the last separator */
COUNTUP(i, strlen(filename)) {
if (filename[i] == '/')
start = i+1;
}
/* find the first full stop */
for (j = start; j < strlen(filename); j++) {
if ((filename[j] == '.') ||
(filename[j] == '-') ||
(filename[j] == '_')) break;
classification[j-start] = filename[j];
}
/* add a string terminator */
classification[j-start] = 0;
}
/**
* @brief Takes a set of classification text descriptions (labels) for
* each instance within a training or test set and produces an
* array of classification numbers corresponding to the text
* descriptions. It's easier for the system to deal with
* classification numbers rather than text descriptions.
* @param no_of_instances The number of instances in the training or test set
* @param instance_classification Text Description for each instance
* @param numbers Array of numbers corresponding to each instance
*/
int bp_classifications_to_numbers(int no_of_instances,
char ** instance_classification,
int * numbers)
{
int j;
int unique_ctr = 0;
char ** unique_classification;
/* allocate memory for a list of unique descriptions (labels) */
CHARPTRALLOC(unique_classification, no_of_instances);
if (!unique_classification)
return -1;
/* create a list of unique classification names */
COUNTUP(i, no_of_instances) {
/* for every class number assigned so far */
for (j = 0; j < unique_ctr; j++) {
/* is this instance description (label) the same as a previous
instance description ? */
if (strcmp(instance_classification[i],
unique_classification[j])==0) {
/* assign the same class number and finish the search */
numbers[i] = j;
break;
}
}
/* if this instance has a description which has not been used before */
if (j == unique_ctr) {
/* store the description */
CHARALLOC(unique_classification[unique_ctr],
1+strlen(instance_classification[i]));
if (!unique_classification[unique_ctr]) {
COUNTDOWN(i, unique_ctr)
free(unique_classification[i]);
free(unique_classification);
return -2;
}
sprintf(unique_classification[unique_ctr],
"%s", instance_classification[i]);
/* store the classification number */
numbers[i] = unique_ctr;
/* increment the number of classes */
unique_ctr++;
}
}
/* free memory which was used to store descriptions */
COUNTDOWN(i, unique_ctr)
free(unique_classification[i]);
free(unique_classification);
return 0;
}
|
omp_task_private.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
/* Utility function do spend some time in a loop */
int test_omp_task_private()
{
int i;
int known_sum;
int sum = 0;
int result = 0; /* counts the wrong sums from tasks */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
#pragma omp task private(sum) shared(result, known_sum)
{
int j;
//if sum is private, initialize to 0
sum = 0;
for (j = 0; j <= LOOPCOUNT; j++) {
#pragma omp flush
sum += j;
}
/* check if calculated sum was right */
if (sum != known_sum) {
#pragma omp critical
result++;
}
} /* end of omp task */
} /* end of for */
} /* end of single */
} /* end of parallel*/
return (result == 0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_task_private()) {
num_failed++;
}
}
return num_failed;
}
|
core_ztrssq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
// This computation also shows up in plasma_core_zsyssq() and can be factored out.
// LAPACK does real and imag components separately in zlassq.
static inline void ssq(plasma_complex64_t value, double *scale, double *sumsq)
{
double absa = cabs(value);
if (absa != 0.0) { // != propagates nan
if (*scale < absa) {
*sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa));
*scale = absa;
}
else {
*sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale)));
}
}
}
/******************************************************************************/
__attribute__((weak))
void plasma_core_ztrssq(plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const plasma_complex64_t *A, int lda,
double *scale, double *sumsq)
{
if (uplo == PlasmaUpper) {
if (diag == PlasmaNonUnit) {
for (int j = 0; j < n; j++) {
ssq(A[lda*j], scale, sumsq);
for (int i = 1; i < imin(j+1, m); i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
else { // PlasmaUnit
int j;
for (j = 0; j < imin(n, m); j++) {
ssq(1.0, scale, sumsq);
for (int i = 0; i < j; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
for (; j < n; j++) {
ssq(A[lda*j], scale, sumsq);
for (int i = 1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
}
else { // PlasmaLower
if (diag == PlasmaNonUnit) {
for (int j = 0; j < imin(n, m); j++) {
ssq(A[lda*j+j], scale, sumsq);
for (int i = j+1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
else { // PlasmaUnit
for (int j = 0; j < imin(n, m); j++) {
ssq(1.0, scale, sumsq);
for (int i = j+1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
}
}
/******************************************************************************/
void plasma_core_omp_ztrssq(plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const plasma_complex64_t *A, int lda,
double *scale, double *sumsq,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:scale[0:n]) \
depend(out:sumsq[0:n])
{
if (sequence->status == PlasmaSuccess) {
*scale = 0.0;
*sumsq = 1.0;
plasma_core_ztrssq(uplo, diag, m, n, A, lda, scale, sumsq);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.